source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
for-20.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(nonmonotonic:guided)
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_guided_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_nonmonotonic_guided_next" 1 "ompexp" } } */
|
for-1.c | /* { dg-do compile } */
void foo (int j, int k)
{
int i;
/* Valid loops. */
#pragma omp for
for (i = 0; i < 10; i++)
baz (i);
#pragma omp for
for (i = j; i <= 10; i+=4)
baz (i);
#pragma omp for
for (i = j; i > 0; i = i - 1)
baz (j);
#pragma omp for
for (i = j; i >= k; i--)
baz (i);
/* Malformed parallel loops. */
#pragma omp for
i = 0; /* { dg-error "for statement expected" } */
for ( ; i < 10; )
{
baz (i);
i++;
}
#pragma omp for
for (i = 0; ; i--) /* { dg-error "missing controlling predicate" } */
{
if (i >= 10)
break; /* { dg-error "break" } */
baz (i);
}
#pragma omp for
for (i = 0; i < 10 && j > 4; i-=3) /* { dg-error "invalid controlling predicate" } */
baz (i);
#pragma omp for
for (i = 0; i < 10; i-=3, j+=2) /* { dg-error "invalid increment expression" } */
baz (i);
}
|
grid_astar.h | /*
* Copyright (c) 2014-2020, the neonavigation authors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PLANNER_CSPACE_GRID_ASTAR_H
#define PLANNER_CSPACE_GRID_ASTAR_H
#define _USE_MATH_DEFINES
#include <cfloat>
#include <cmath>
#include <limits>
#include <list>
#include <map>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include <boost/chrono.hpp>
#include <planner_cspace/reservable_priority_queue.h>
#include <planner_cspace/cyclic_vec.h>
#include <planner_cspace/blockmem_gridmap.h>
#include <planner_cspace/grid_astar_model.h>
#include <omp.h>
namespace planner_cspace
{
struct SearchStats
{
size_t num_loop;
size_t num_search_queue;
size_t num_prev_updates;
size_t num_total_updates;
};
template <int DIM = 3, int NONCYCLIC = 2>
class GridAstar
{
public:
using Vec = CyclicVecInt<DIM, NONCYCLIC>;
using Vecf = CyclicVecFloat<DIM, NONCYCLIC>;
using VecWithCost = typename GridAstarModelBase<DIM, NONCYCLIC>::VecWithCost;
using ProgressCallback = std::function<bool(const std::list<Vec>&, const SearchStats&)>;
template <class T, int block_width = 0x20>
class Gridmap : public BlockMemGridmap<T, DIM, NONCYCLIC, block_width>
{
using BlockMemGridmap<T, DIM, NONCYCLIC, block_width>::BlockMemGridmap;
};
class PriorityVec
{
public:
float p_;
float p_raw_;
Vec v_;
PriorityVec(const float p, const float p_raw, const Vec& v)
: p_(p)
, p_raw_(p_raw)
, v_(v)
{
}
bool operator<(const PriorityVec& b) const
{
// smaller first
return p_ > b.p_;
}
};
class GridmapUpdate
{
private:
const Vec p0_;
const Vec p1_;
const float cost_estim_;
const float cost_;
public:
GridmapUpdate(
const Vec& p0, const Vec& p1,
const float cost_estim, const float cost)
: p0_(p0)
, p1_(p1)
, cost_estim_(cost_estim)
, cost_(cost)
{
}
const Vec& getParentPos() const
{
return p0_;
}
const Vec& getPos() const
{
return p1_;
}
const float getCost() const
{
return cost_;
}
const PriorityVec getPriorityVec() const
{
return PriorityVec(cost_estim_, cost_, p1_);
}
};
public:
constexpr int getDim() const
{
return DIM;
}
constexpr int getNoncyclic() const
{
return NONCYCLIC;
}
void setSearchTaskNum(const size_t& search_task_num)
{
search_task_num_ = search_task_num;
}
void reset(const Vec size)
{
g_.reset(size);
g_.clear(std::numeric_limits<float>::max());
parents_.reserve(g_.ser_size() / 16);
open_.reserve(g_.ser_size() / 16);
}
GridAstar()
: queue_size_limit_(0)
, search_task_num_(1)
{
}
explicit GridAstar(const Vec size)
{
reset(size);
queue_size_limit_ = 0;
}
void setQueueSizeLimit(const size_t size)
{
queue_size_limit_ = size;
}
bool search(
const std::vector<VecWithCost>& ss, const Vec& e,
std::list<Vec>& path,
const typename GridAstarModelBase<DIM, NONCYCLIC>::Ptr& model,
ProgressCallback cb_progress,
const float cost_leave,
const float progress_interval,
const bool return_best = false)
{
return searchImpl(
g_, ss, e, path,
model, cb_progress,
cost_leave, progress_interval, return_best);
}
protected:
bool searchImpl(
Gridmap<float>& g,
const std::vector<VecWithCost>& sts, const Vec& en,
std::list<Vec>& path,
const typename GridAstarModelBase<DIM, NONCYCLIC>::Ptr& model,
ProgressCallback cb_progress,
const float cost_leave,
const float progress_interval,
const bool return_best = false)
{
if (sts.size() == 0)
return false;
auto ts = boost::chrono::high_resolution_clock::now();
Vec e = en;
e.cycleUnsigned(g.size());
g.clear(std::numeric_limits<float>::max());
open_.clear();
parents_.clear();
std::vector<VecWithCost> ss_normalized;
Vec better;
int cost_estim_min = std::numeric_limits<int>::max();
for (const VecWithCost& st : sts)
{
if (st.v_ == en)
return false;
Vec s = st.v_;
s.cycleUnsigned(g.size());
ss_normalized.emplace_back(s, st.c_);
g[s] = st.c_;
const int cost_estim = model->costEstim(s, e);
open_.emplace(cost_estim + st.c_, st.c_, s);
if (cost_estim_min > cost_estim)
{
cost_estim_min = cost_estim;
better = s;
}
}
std::vector<PriorityVec> centers;
centers.reserve(search_task_num_);
size_t num_updates(0);
size_t num_total_updates(0);
size_t num_loop(0);
bool found(false);
bool abort(false);
#pragma omp parallel
{
std::vector<GridmapUpdate> updates;
// Reserve buffer using example search diff list
updates.reserve(
search_task_num_ *
model->searchGrids(ss_normalized[0].v_, ss_normalized, e).size() /
omp_get_num_threads());
std::vector<Vec> dont;
dont.reserve(search_task_num_);
while (true)
{
#pragma omp barrier
#pragma omp single
{
const size_t num_search_queue = open_.size();
num_loop++;
// Fetch tasks to be paralellized
centers.clear();
for (size_t i = 0; i < search_task_num_;)
{
if (open_.size() == 0)
break;
PriorityVec center(open_.top());
open_.pop();
if (center.v_ == e || center.p_ - center.p_raw_ < cost_leave)
{
e = center.v_;
found = true;
break;
}
centers.emplace_back(std::move(center));
++i;
}
const auto tnow = boost::chrono::high_resolution_clock::now();
if (boost::chrono::duration<float>(tnow - ts).count() >= progress_interval)
{
std::list<Vec> path_tmp;
ts = tnow;
findPath(ss_normalized, better, path_tmp);
const SearchStats stats =
{
.num_loop = num_loop,
.num_search_queue = num_search_queue,
.num_prev_updates = num_updates,
.num_total_updates = num_total_updates,
};
if (!cb_progress(path_tmp, stats))
{
abort = true;
}
}
num_updates = 0;
}
if (centers.size() < 1 || found || abort)
break;
updates.clear();
dont.clear();
#pragma omp for schedule(static)
for (auto it = centers.cbegin(); it < centers.cend(); ++it)
{
const Vec p = it->v_;
const float c = it->p_raw_;
const float c_estim = it->p_;
const float gp = g[p];
if (c > gp)
continue;
if (c_estim - c < cost_estim_min)
{
cost_estim_min = c_estim - c;
better = p;
}
const std::vector<Vec> search_list = model->searchGrids(p, ss_normalized, e);
bool updated(false);
for (auto it = search_list.cbegin(); it < search_list.cend(); ++it)
{
Vec next = p + *it;
next.cycleUnsigned(g.size());
if (next.isExceeded(g.size()))
continue;
if (g[next] < gp)
{
// Skip as this search task has no chance to find better way.
continue;
}
const float cost_estim = model->costEstim(next, e);
if (cost_estim < 0 || cost_estim == std::numeric_limits<float>::max())
continue;
const float cost = model->cost(p, next, ss_normalized, e);
if (cost < 0 || cost == std::numeric_limits<float>::max())
continue;
const float cost_next = c + cost;
if (g[next] > cost_next)
{
updated = true;
updates.emplace_back(p, next, cost_next + cost_estim, cost_next);
}
}
if (!updated)
dont.push_back(p);
}
#pragma omp barrier
#pragma omp critical
{
for (const GridmapUpdate& u : updates)
{
if (g[u.getPos()] > u.getCost())
{
g[u.getPos()] = u.getCost();
parents_[u.getPos()] = u.getParentPos();
open_.push(std::move(u.getPriorityVec()));
if (queue_size_limit_ > 0 && open_.size() > queue_size_limit_)
open_.pop_back();
}
}
for (const Vec& p : dont)
{
g[p] = -1;
}
const size_t n = updates.size();
num_updates += n;
num_total_updates += n;
} // omp critical
}
} // omp parallel
if (!found)
{
// No fesible path
if (return_best)
{
findPath(ss_normalized, better, path);
}
return false;
}
return findPath(ss_normalized, e, path);
}
bool findPath(const std::vector<VecWithCost>& ss, const Vec& e, std::list<Vec>& path) const
{
std::unordered_map<Vec, Vec, Vec> parents = parents_;
Vec n = e;
while (true)
{
path.push_front(n);
bool found(false);
for (const VecWithCost& s : ss)
{
if (n == s.v_)
{
found = true;
break;
}
}
if (found)
break;
if (parents.find(n) == parents.end())
return false;
const Vec child = n;
n = parents[child];
parents.erase(child);
}
return true;
}
Gridmap<float> g_;
std::unordered_map<Vec, Vec, Vec> parents_;
reservable_priority_queue<PriorityVec> open_;
size_t queue_size_limit_;
size_t search_task_num_;
};
} // namespace planner_cspace
#endif // PLANNER_CSPACE_GRID_ASTAR_H
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
int size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// int ii;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
// overwrite default submodules opts
// qp tolerance
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
// config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value);
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 0) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
// printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field);
// exit(1);
}
}
return;
}
void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// nlp res
size += ocp_nlp_res_calculate_size(dims);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 8; // initial align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
// zero timers
double total_time = 0.0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// main sqp loop
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
if (opts->print_level > 0)
{
printf("\n------- sqp iter %d (max_iter %d) --------\n",
sqp_iter, opts->max_iter);
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
}
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, nlp_mem);
nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_b :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_d :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_m :
nlp_out->inf_norm_res;
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g;
mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b;
mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d;
mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m;
}
// exit conditions on residuals
if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) &
(mem->nlp_res->inf_norm_res_b < opts->tol_eq) &
(mem->nlp_res->inf_norm_res_d < opts->tol_ineq) &
(mem->nlp_res->inf_norm_res_m < opts->tol_comp))
{
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
}
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(nlp_mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
if (opts->print_level > 1)
{
printf("\n Failed to solve the following QP:\n");
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
}
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
if (opts->print_level > 0)
{
printf("Residuals: stat: %e, eq: %e, ineq: %e, comp: %e.\n", mem->nlp_res->inf_norm_res_g,
mem->nlp_res->inf_norm_res_b, mem->nlp_res->inf_norm_res_d, mem->nlp_res->inf_norm_res_m );
}
}
// stop timer
total_time += acados_toc(&timer0);
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
printf("\n ocp_nlp_sqp: maximum iterations reached\n");
if (opts->print_level > 0)
{
printf("Residuals: stat: %e, eq: %e, ineq: %e, comp: %e.\n", mem->nlp_res->inf_norm_res_g,
mem->nlp_res->inf_norm_res_b, mem->nlp_res->inf_norm_res_d, mem->nlp_res->inf_norm_res_m );
}
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii], nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_,
char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_res;
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
return;
}
|
api.c | // RUN: %libomptarget-compile-run-and-check-generic
// XFAIL: nvptx64-nvidia-cuda
// XFAIL: nvptx64-nvidia-cuda-newRTL
// Fails on amdgpu with error: GPU Memory Error
// XFAIL: amdgcn-amd-amdhsa
// XFAIL: amdgcn-amd-amdhsa-newRTL
#include <stdio.h>
#include <omp.h>
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
extern void __tgt_register_requires(int64_t);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
#pragma omp requires unified_shared_memory
#define N 1024
void init(int A[], int B[], int C[]) {
for (int i = 0; i < N; ++i) {
A[i] = 0;
B[i] = 1;
C[i] = i;
}
}
int main(int argc, char *argv[]) {
const int device = omp_get_default_device();
// Manual registration of requires flags for Clang versions
// that do not support requires.
__tgt_register_requires(8);
// CHECK: Initial device: [[INITIAL_DEVICE:[0-9]+]]
printf("Initial device: %d\n", omp_get_initial_device());
// CHECK: Num devices: [[INITIAL_DEVICE]]
printf("Num devices: %d\n", omp_get_num_devices());
//
// Target alloc & target memcpy
//
int A[N], B[N], C[N];
// Init
init(A, B, C);
int *pA, *pB, *pC;
// map ptrs
pA = &A[0];
pB = &B[0];
pC = &C[0];
int *d_A = (int *)omp_target_alloc(N * sizeof(int), device);
int *d_B = (int *)omp_target_alloc(N * sizeof(int), device);
int *d_C = (int *)omp_target_alloc(N * sizeof(int), device);
// CHECK: omp_target_alloc succeeded
printf("omp_target_alloc %s\n", d_A && d_B && d_C ? "succeeded" : "failed");
omp_target_memcpy(d_B, pB, N * sizeof(int), 0, 0, device,
omp_get_initial_device());
omp_target_memcpy(d_C, pC, N * sizeof(int), 0, 0, device,
omp_get_initial_device());
#pragma omp target is_device_ptr(d_A, d_B, d_C) device(device)
{
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++) {
d_A[i] = d_B[i] + d_C[i] + 1;
}
}
omp_target_memcpy(pA, d_A, N * sizeof(int), 0, 0, omp_get_initial_device(),
device);
// CHECK: Test omp_target_memcpy: Succeeded
int fail = 0;
for (int i = 0; i < N; ++i) {
if (A[i] != i + 2)
fail++;
}
if (fail) {
printf("Test omp_target_memcpy: Failed\n");
} else {
printf("Test omp_target_memcpy: Succeeded\n");
}
//
// target_is_present and target_associate/disassociate_ptr
//
init(A, B, C);
// CHECK: B is not present, associating it...
// CHECK: omp_target_associate_ptr B succeeded
if (!omp_target_is_present(B, device)) {
printf("B is not present, associating it...\n");
int rc = omp_target_associate_ptr(B, d_B, N * sizeof(int), 0, device);
printf("omp_target_associate_ptr B %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: C is not present, associating it...
// CHECK: omp_target_associate_ptr C succeeded
if (!omp_target_is_present(C, device)) {
printf("C is not present, associating it...\n");
int rc = omp_target_associate_ptr(C, d_C, N * sizeof(int), 0, device);
printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: Inside target data: A is not present
// CHECK: Inside target data: B is present
// CHECK: Inside target data: C is present
#pragma omp target data map(from : B, C) device(device)
{
printf("Inside target data: A is%s present\n",
omp_target_is_present(A, device) ? "" : " not");
printf("Inside target data: B is%s present\n",
omp_target_is_present(B, device) ? "" : " not");
printf("Inside target data: C is%s present\n",
omp_target_is_present(C, device) ? "" : " not");
#pragma omp target map(from : A) device(device)
{
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < N; i++)
A[i] = B[i] + C[i] + 1;
}
}
// CHECK: B is present, disassociating it...
// CHECK: omp_target_disassociate_ptr B succeeded
// CHECK: C is present, disassociating it...
// CHECK: omp_target_disassociate_ptr C succeeded
if (omp_target_is_present(B, device)) {
printf("B is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(B, device);
printf("omp_target_disassociate_ptr B %s\n", !rc ? "succeeded" : "failed");
}
if (omp_target_is_present(C, device)) {
printf("C is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(C, device);
printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
// CHECK: Test omp_target_associate_ptr: Succeeded
fail = 0;
for (int i = 0; i < N; ++i) {
if (A[i] != i + 2)
fail++;
}
if (fail) {
printf("Test omp_target_associate_ptr: Failed\n");
} else {
printf("Test omp_target_associate_ptr: Succeeded\n");
}
omp_target_free(d_A, device);
omp_target_free(d_B, device);
omp_target_free(d_C, device);
printf("Done!\n");
return 0;
}
|
GB_unop__atanh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atanh_fc64_fc64)
// op(A') function: GB (_unop_tran__atanh_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = catanh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = catanh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = catanh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atanh_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = catanh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = catanh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atanh_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB068-restrictpointer2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The restrict type qualifier is an indication to the compiler that,
if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory.
If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer.
A C99 restrict feature.
For gcc, you must use -std=c99 to compile this program.
*/
#include <stdlib.h>
#include <stdio.h>
void init(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for private(i)
for (i = 0; i < n; i++) {
a[i] = 1;
b[i] = i;
c[i] = i * i;
}
}
void foo(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for private(i)
for (i = 0; i < n; i++)
a[i] = b[i] + c[i];
}
void print(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
for (i = 0; i < n; i++) {
printf("%d %d %d\n", a[i], b[i], c[i]);
}
}
int main()
{
int n = 1000;
int * a , *b, *c;
a = (int*) malloc (n* sizeof (int));
if (a ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
b = (int*) malloc (n* sizeof (int));
if (b ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
c = (int*) malloc (n* sizeof (int));
if (c ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
init (n, a, b,c);
foo (n, a, b,c);
print (n, a, b,c);
free (a);
free (b);
free (c);
return 0;
}
|
mpi_static_mandelbrot.c | //
// mandelbrot.c
//
//
// The Mandelbrot calculation is to iterate the equation
// z = z*z + c, where z and c are complex numbers, z is initially
// zero, and c is the coordinate of the point being tested. If
// the magnitude of z remains less than 2 for ever, then the point
// c is in the Mandelbrot set. In this code We write out the number of iterations
// before the magnitude of z exceeds 2, or UCHAR_MAX, whichever is
// smaller.//
//
//
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include <stdio.h>
#include <unistd.h>
#include <omp.h>
/*void color(int red, int green, int blue)
{
fputc((char)red, stdout);
fputc((char)green, stdout);
fputc((char)blue, stdout);
}*/
int main (int argc, char* argv[])
{
int rank, size;// id process and size of the communicator
//int namelen;
MPI_Init (&argc, &argv); /* starts MPI */
MPI_Comm_rank (MPI_COMM_WORLD, &rank); // get current process id
MPI_Comm_size (MPI_COMM_WORLD, &size); // get number of processes
int w = atoi(argv[1]), h = atoi(argv[2]), x, y; // width, height from arguments
double pr, pi;
double newRe, newIm, oldRe, oldIm; //real and imaginary parts of new and old z
double zoom = 1, moveX = -0.5, moveY = 0; //you can change these to zoom and change position
int maxIterations = atoi(argv[3]);//after how much iterations the function should stop
double start, elapsed;
int rowsForWorker=h/size;// We got the number of rows that a Worker have to process
int begin,end;
//MPI_Get_processor_name(hostname, &namelen); // get CPU name
start = MPI_Wtime();
//printf( "Hello world from process %d of %d (node %s)\n", rank, size, hostname );
begin= rowsForWorker*rank;// first row to process the process
end= rowsForWorker*rank+ rowsForWorker;//last row to process for that process
typedef unsigned char pixelType[3];
//pixelType *pixels=malloc(sizeof(pixelType)*h*w);
FILE * sortida;// File to generate the intermediary results
//printf("P6\n# CREATOR: Eric R. Weeks / mandel program\n");
//printf("%d %d\n255\n",w,h);
if(rank==size-1){
end=end+h%size;// if is the last process we give the rest of the rows to process
rowsForWorker=rowsForWorker+h%size;
}
pixelType *pixelsWorker=malloc(sizeof(pixelType)*rowsForWorker*w);
//from the first row to the last row
#pragma omp parallel for shared(pixelsWorker,moveX,moveY,zoom) private(x,y,pr,pi,newRe,newIm,oldRe,oldIm) schedule(static)
for(y =begin ; y <end ; y++){
for(x = 0; x < w; x++)
{
//calculate the initial real and imaginary part of z, based on the pixel location and zoom and position values
pr = 1.5 * (x - w / 2) / (0.5 * zoom * w) + moveX;
pi = (y - h / 2) / (0.5 * zoom * h) + moveY;
newRe = newIm = oldRe = oldIm = 0; //these should start at 0,0
//"i" will represent the number of iterations
int i;
//start the iteration process
for(i = 0; i < maxIterations; i++)
{
//remember value of previous iteration
oldRe = newRe;
oldIm = newIm;
//the actual iteration, the real and imaginary part are calculated
newRe = oldRe * oldRe - oldIm * oldIm + pr;
newIm = 2 * oldRe * oldIm + pi;
//if the point is outside the circle with radius 2: stop
if((newRe * newRe + newIm * newIm) > 4) break;
}
// color(i % 256, 255, 255 * (i < maxIterations));
if(i == maxIterations){
pixelsWorker[((y%rowsForWorker)*w+x)][0]=(char)0;
pixelsWorker[((y%rowsForWorker)*w+x)][1]=(char)0;
pixelsWorker[((y%rowsForWorker)*w+x)][2]=(char)0;
}
else
{
double z = sqrt(newRe * newRe + newIm * newIm);
int brightness = 256 * log2(1.75 + i - log2(log2(z))) / log2((double)maxIterations);
//color(brightness, brightness, 255);
pixelsWorker[((y%rowsForWorker)*w+x)][0]=(char)brightness;
pixelsWorker[((y%rowsForWorker)*w+x)][1]=(char)brightness;
pixelsWorker[((y%rowsForWorker)*w+x)][2]=(char)255;
}
}
}
//MPI_Barrier(MPI_COMM_WORLD);
if(rank==0){
int proc=0;
//fprintf(stderr, "Elapsed time: %.2lf seconds.\n", omp_get_wtime()-timeBegin);
int i=0;
int j=0;
sortida= fopen("sortida.ppm","wb");
fprintf(sortida, "P6\n# CREATOR: Eric R. Weeks / mandel program\n");
fprintf(sortida, "%d %d\n255\n", w, h);
for(proc=0;proc<size;proc++){
if(proc!=0){
//char c;
char row[sizeof(pixelType)*w];
pixelsWorker=malloc(sizeof(pixelType)*rowsForWorker*w);
MPI_Recv(pixelsWorker, sizeof(pixelType)*rowsForWorker*w, MPI_CHAR, proc, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);// Only waits for data for the slaves
}
for(i=0;i<rowsForWorker;i++){
for(j=0;j<w;j++){
fwrite(pixelsWorker[i*w+j],1,sizeof(pixelType),sortida);
}
}
}
elapsed = MPI_Wtime() - start;
fprintf(stderr, "Elapsed time: %.2lf seconds.\n", elapsed);
fclose(sortida);
free(pixelsWorker);
}else{
MPI_Send(pixelsWorker, sizeof(pixelType)*rowsForWorker*w, MPI_CHAR, 0,0, MPI_COMM_WORLD);
}
MPI_Finalize();
return 0;
}
|
SumaVectoresC.c | /* SumaVectoresC.c
Suma de dos vectores: v3 = v1 + v2
Para compilar usar (-lrt: real time library):
gcc -O2 SumaVectores.c -o SumaVectores -lrt
Para ejecutar use: SumaVectoresC longitud
*/
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#include <time.h> // biblioteca donde se encuentra la función clock_gettime()
#include <omp.h>
//#define PRINTF_ALL // comentar para quitar el printf ...
// que imprime todos los componentes
//Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ...
//tres defines siguientes puede estar descomentado):
//#define VECTOR_LOCAL // descomentar para que los vectores sean variables ...
// locales (si se supera el tamaño de la pila se ...
// generará el error "Violación de Segmento")
#define VECTOR_GLOBAL// descomentar para que los vectores sean variables ...
// globales (su longitud no estará limitada por el ...
// tamaño de la pila del programa)
//#define VECTOR_DYNAMIC // descomentar para que los vectores sean variables ...
// dinámicas (memoria reutilizable durante la ejecución)
#ifdef VECTOR_GLOBAL
#define MAX 33554432 //=2^25
double v1[MAX], v2[MAX], v3[MAX];
#endif
int main(int argc, char** argv){
int i;
struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución
//Leer argumento de entrada (nº de componentes del vector)
if (argc<2){
printf("Faltan nº componentes del vector\n");
exit(-1);
}
unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B)
#ifdef VECTOR_LOCAL
double v1[N], v2[N], v3[N]; // Tamaño variable local en tiempo de ejecución ...
// disponible en C a partir de actualización C99
#endif
#ifdef VECTOR_GLOBAL
if (N>MAX) N=MAX;
#endif
#ifdef VECTOR_DYNAMIC
double *v1, *v2, *v3;
v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes
v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL
v3 = (double*) malloc(N*sizeof(double));
if ( (v1==NULL) || (v2==NULL) || (v3==NULL) ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
//Inicializar vectores
for(i=0; i<N; i++){
v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N
}
//******************************************************************************
//clock_gettime(CLOCK_REALTIME,&cgt1);
double t1 = omp_get_wtime();
#pragma omp for
//Calcular suma de vectores
for(i=0; i<N; i++)
v3[i] = v1[i] + v2[i];
double t2 = omp_get_wtime();
//clock_gettime(CLOCK_REALTIME,&cgt2);//****************************************
double diferencia = t2-t1;
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+
(double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
//Imprimir resultado de la suma y el tiempo de ejecución
#ifdef PRINTF_ALL
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,N);
for(i=0; i<N; i++)
printf("/ V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n",
i,i,i,v1[i],v2[i],v3[i]);
#else
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f) V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]);
#endif
#ifdef VECTOR_DYNAMIC
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
free(v3); // libera el espacio reservado para v3
#endif
return 0;
}
|
GB_binop__second_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp32)
// A*D function (colscale): GB (_AxD__second_fp32)
// D*A function (rowscale): GB (_DxB__second_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB (_bind2nd__second_fp32)
// C=A'+scalar GB (_bind2nd_tran__second_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = bij
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP32 || GxB_NO_SECOND_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__second_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB (_bind2nd_tran__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
prop_container.h | // -*- mode:c++; c-basic-offset:4 -*-
#ifndef PROP_CONTAINER_H_KL3
#define PROP_CONTAINER_H_KL3
#include <vector>
#include <string>
#include <util/gjp.h>
#include <util/verbose.h>
#include <alg/qpropw.h>
#include <omp.h>
#include <cassert>
#include "my_util.h"
// Propagators from all time slices (including the case where the
// source is on the mirrored lattice).
class AllProp {
public:
enum PREC { SINGLE, DOUBLE };
AllProp(PREC _p)
:lcl_vol(cps::GJP.VolNodeSites()),
t_size_glb(cps::GJP.Sites(3)),
prec(_p)
{
if(prec == SINGLE) {
ps.assign(t_size_glb, std::vector<cps::WilsonMatrixS>());
as.assign(t_size_glb, std::vector<cps::WilsonMatrixS>());
} else {
pd.assign(t_size_glb, std::vector<cps::WilsonMatrix>());
ad.assign(t_size_glb, std::vector<cps::WilsonMatrix>());
}
}
// return a WilsonMatrix according to the type of propagators
//
// t: source location (time slice), for P or A propagators this
// can be any value in [0, t_size_glb), for P+A/P-A propagators
// the size is doubled: t in [0, 2 * t_size_glb).
//
// i: sink location (4D corrdinate), for P or A propagators this
// can be anything in [0, lcl_vol), for P+A/P-A propagators
// this can be anything in [0, 2 * lcl_vol).
const cps::WilsonMatrix operator()(size_t i, size_t t, PROP_TYPE ptype)const {
switch(ptype) {
case PROP_P:
assert(t < t_size_glb);
assert(i < lcl_vol);
return prec == SINGLE
? cps::WilsonMatrix(ps[t][i])
: pd[t][i];
case PROP_A:
assert(t < t_size_glb);
assert(i < lcl_vol);
return prec == SINGLE
? cps::WilsonMatrix(as[t][i])
: ad[t][i];
case PROP_PA:
{
assert(t < 2 * t_size_glb);
assert(i < 2 * lcl_vol);
bool add = t < t_size_glb == i < lcl_vol;
if(t >= t_size_glb) t -= t_size_glb;
if(i >= lcl_vol) i -= lcl_vol;
cps::WilsonMatrix pi, ai;
if(prec == SINGLE) {
pi = ps[t][i];
ai = as[t][i];
} else {
pi = pd[t][i];
ai = ad[t][i];
}
return 0.5 * (add ? pi + ai : pi - ai);
}
default:
assert(false);
}
}
// Test if a certain type of propagator is NOT calculated on a
// given time slice.
//
// P+A/P-A propagator on a given time slice requires both periodic
// and antiperiodic propagators.
bool empty(size_t t, PROP_TYPE ptype)const {
switch(ptype) {
case PROP_P:
assert(t < t_size_glb);
return prec == SINGLE
? ps[t].empty()
: pd[t].empty();
case PROP_A:
assert(t < t_size_glb);
return prec == SINGLE
? as[t].empty()
: ad[t].empty();
case PROP_PA:
assert(t < 2 * t_size_glb);
if(t >= t_size_glb) t -= t_size_glb;
if(prec == SINGLE) {
return ps[t].empty() || as[t].empty();
} else {
return pd[t].empty() || ad[t].empty();
}
default:
assert(false);
}
}
void clearP(void) {
for(unsigned i = 0; i < ps.size(); ++i) {
ps[i].clear();
}
for(unsigned i = 0; i < pd.size(); ++i) {
pd[i].clear();
}
}
void clearA(void) {
for(unsigned i = 0; i < as.size(); ++i) {
as[i].clear();
}
for(unsigned i = 0; i < ad.size(); ++i) {
ad[i].clear();
}
}
void clear(void) {
clearP();
clearA();
}
// Add a propagator where the source is located at time slice t.
// If periodic == true then it will be treated as a P-boundary
// condition propagator, otherwise it will be treated as an
// A-boundary condition propagator.
//
// Potentially transforms the propagator to single precision to
// save some memory.
void add(cps::QPropW &qp, size_t t, bool periodic) {
if(prec == SINGLE) {
std::vector<cps::WilsonMatrixS> &wm = periodic ? ps[t] : as[t];
assert(wm.empty());
wm.resize(lcl_vol);
#pragma omp parallel for
for(size_t i = 0; i < lcl_vol; ++i) {
wm[i] = qp[i];
}
} else {
std::vector<cps::WilsonMatrix> &wm = periodic ? pd[t] : ad[t];
assert(wm.empty());
wm.resize(lcl_vol);
#pragma omp parallel for
for(size_t i = 0; i < lcl_vol; ++i) {
wm[i] = qp[i];
}
}
}
// store all propagators I have.
//
//! IMPORTANT: This function assumes gauge fixed wall source!!!
void store_all(const std::string &fn_stem, double mass, int traj)const;
private:
void store(const std::string &fn,
const std::vector<cps::WilsonMatrix> &prop,
int t)const;
private:
const size_t lcl_vol;
const size_t t_size_glb;
const PREC prec;
std::vector<std::vector<cps::WilsonMatrixS> > ps; // P prop (single)
std::vector<std::vector<cps::WilsonMatrixS> > as; // A prop (single)
std::vector<std::vector<cps::WilsonMatrix> > pd; // P prop (double)
std::vector<std::vector<cps::WilsonMatrix> > ad; // A prop (double)
};
#endif
|
utils.c | #ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "utils.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#include <assert.h>
#include <float.h>
#include <limits.h>
#include "darkunistd.h"
#include <libgen.h>
#ifdef WIN32
#include "gettimeofday.h"
#else
#include <sys/time.h>
#include <sys/stat.h>
#endif
#ifndef USE_CMAKE_LIBS
#pragma warning(disable: 4996)
#endif
void *xmalloc(size_t size) {
void *ptr=malloc(size);
if(!ptr) {
malloc_error();
}
return ptr;
}
void *xcalloc(size_t nmemb, size_t size) {
void *ptr=calloc(nmemb,size);
if(!ptr) {
calloc_error();
}
return ptr;
}
void *xrealloc(void *ptr, size_t size) {
ptr=realloc(ptr,size);
if(!ptr) {
realloc_error();
}
return ptr;
}
double what_time_is_it_now()
{
struct timeval time;
if (gettimeofday(&time, NULL)) {
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int *read_map(char *filename)
{
int n = 0;
int *map = 0;
char *str;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
while((str=fgetl(file))){
++n;
map = (int*)xrealloc(map, n * sizeof(int));
map[n-1] = atoi(str);
free(str);
}
if (file) fclose(file);
return map;
}
void sorta_shuffle(void *arr, size_t n, size_t size, size_t sections)
{
size_t i;
for(i = 0; i < sections; ++i){
size_t start = n*i/sections;
size_t end = n*(i+1)/sections;
size_t num = end-start;
shuffle((char*)arr+(start*size), num, size);
}
}
void shuffle(void *arr, size_t n, size_t size)
{
size_t i;
void* swp = (void*)xcalloc(1, size);
for(i = 0; i < n-1; ++i){
size_t j = i + random_gen()/(RAND_MAX / (n-i)+1);
memcpy(swp, (char*)arr+(j*size), size);
memcpy((char*)arr+(j*size), (char*)arr+(i*size), size);
memcpy((char*)arr+(i*size), swp, size);
}
free(swp);
}
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *basecfg(char *cfgfile)
{
char *c = cfgfile;
char *next;
while((next = strchr(c, '/')))
{
c = next+1;
}
if(!next) while ((next = strchr(c, '\\'))) { c = next + 1; }
c = copy_string(c);
next = strchr(c, '.');
if (next) *next = 0;
return c;
}
int alphanum_to_int(char c)
{
return (c < 58) ? c - 48 : c-87;
}
char int_to_alphanum(int i)
{
if (i == 36) return '.';
return (i < 10) ? i + 48 : i + 87;
}
void pm(int M, int N, float *A)
{
int i,j;
for(i =0 ; i < M; ++i){
printf("%d ", i+1);
for(j = 0; j < N; ++j){
printf("%2.4f, ", A[i*N+j]);
}
printf("\n");
}
printf("\n");
}
void find_replace(const char* str, char* orig, char* rep, char* output)
{
char* buffer = (char*)calloc(8192, sizeof(char));
char *p;
sprintf(buffer, "%s", str);
if (!(p = strstr(buffer, orig))) { // Is 'orig' even in 'str'?
sprintf(output, "%s", buffer);
free(buffer);
return;
}
*p = '\0';
sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig));
free(buffer);
}
void trim(char *str)
{
char* buffer = (char*)xcalloc(8192, sizeof(char));
sprintf(buffer, "%s", str);
char *p = buffer;
while (*p == ' ' || *p == '\t') ++p;
char *end = p + strlen(p) - 1;
while (*end == ' ' || *end == '\t') {
*end = '\0';
--end;
}
sprintf(str, "%s", p);
free(buffer);
}
void find_replace_extension(char *str, char *orig, char *rep, char *output)
{
char* buffer = (char*)calloc(8192, sizeof(char));
sprintf(buffer, "%s", str);
char *p = strstr(buffer, orig);
int offset = (p - buffer);
int chars_from_end = strlen(buffer) - offset;
if (!p || chars_from_end != strlen(orig)) { // Is 'orig' even in 'str' AND is 'orig' found at the end of 'str'?
sprintf(output, "%s", buffer);
free(buffer);
return;
}
*p = '\0';
sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig));
free(buffer);
}
void replace_image_to_label(const char* input_path, char* output_path)
{
find_replace(input_path, "/images/train2014/", "/labels/train2014/", output_path); // COCO
find_replace(output_path, "/images/val2014/", "/labels/val2014/", output_path); // COCO
find_replace(output_path, "/JPEGImages/", "/labels/", output_path); // PascalVOC
find_replace(output_path, "\\images\\train2014\\", "\\labels\\train2014\\", output_path); // COCO
find_replace(output_path, "\\images\\val2014\\", "\\labels\\val2014\\", output_path); // COCO
find_replace(output_path, "\\JPEGImages\\", "\\labels\\", output_path); // PascalVOC
//find_replace(output_path, "/images/", "/labels/", output_path); // COCO
//find_replace(output_path, "/VOC2007/JPEGImages/", "/VOC2007/labels/", output_path); // PascalVOC
//find_replace(output_path, "/VOC2012/JPEGImages/", "/VOC2012/labels/", output_path); // PascalVOC
//find_replace(output_path, "/raw/", "/labels/", output_path);
trim(output_path);
// replace only ext of files
find_replace_extension(output_path, ".jpg", ".txt", output_path);
find_replace_extension(output_path, ".JPG", ".txt", output_path); // error
find_replace_extension(output_path, ".jpeg", ".txt", output_path);
find_replace_extension(output_path, ".JPEG", ".txt", output_path);
find_replace_extension(output_path, ".png", ".txt", output_path);
find_replace_extension(output_path, ".PNG", ".txt", output_path);
find_replace_extension(output_path, ".bmp", ".txt", output_path);
find_replace_extension(output_path, ".BMP", ".txt", output_path);
find_replace_extension(output_path, ".ppm", ".txt", output_path);
find_replace_extension(output_path, ".PPM", ".txt", output_path);
find_replace_extension(output_path, ".tiff", ".txt", output_path);
find_replace_extension(output_path, ".TIFF", ".txt", output_path);
// Check file ends with txt:
if(strlen(output_path) > 4) {
char *output_path_ext = output_path + strlen(output_path) - 4;
if( strcmp(".txt", output_path_ext) != 0){
fprintf(stderr, "Failed to infer label file name (check image extension is supported): %s \n", output_path);
}
}else{
fprintf(stderr, "Label file name is too short: %s \n", output_path);
}
}
float sec(clock_t clocks)
{
return (float)clocks/CLOCKS_PER_SEC;
}
void top_k(float *a, int n, int k, int *index)
{
int i,j;
for(j = 0; j < k; ++j) index[j] = -1;
for(i = 0; i < n; ++i){
int curr = i;
for(j = 0; j < k; ++j){
if((index[j] < 0) || a[curr] > a[index[j]]){
int swap = curr;
curr = index[j];
index[j] = swap;
}
}
}
}
void error(const char *s)
{
perror(s);
assert(0);
exit(EXIT_FAILURE);
}
void malloc_error()
{
fprintf(stderr, "xMalloc error\n");
exit(EXIT_FAILURE);
}
void calloc_error()
{
fprintf(stderr, "Calloc error\n");
exit(EXIT_FAILURE);
}
void realloc_error()
{
fprintf(stderr, "Realloc error\n");
exit(EXIT_FAILURE);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(EXIT_FAILURE);
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n'||c =='\r'||c==0x0d||c==0x0a) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_args(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for (i = 0; i < len; ++i) {
char c = s[i];
if (c == '\t' || c == '\n' || c == '\r' || c == 0x0d || c == 0x0a) ++offset;
else s[i - offset] = c;
}
s[len - offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char* line = (char*)xmalloc(size * sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char*)xrealloc(line, size * sizeof(char));
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(curr >= 2)
if(line[curr-2] == 0x0d) line[curr-2] = 0x00;
if(curr >= 1)
if(line[curr-1] == 0x0a) line[curr-1] = 0x00;
return line;
}
int read_int(int fd)
{
int n = 0;
int next = read(fd, &n, sizeof(int));
if(next <= 0) return -1;
return n;
}
void write_int(int fd, int n)
{
int next = write(fd, &n, sizeof(int));
if(next <= 0) error("read failed");
}
int read_all_fail(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
int next = read(fd, buffer + n, bytes-n);
if(next <= 0) return 1;
n += next;
}
return 0;
}
int write_all_fail(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
size_t next = write(fd, buffer + n, bytes-n);
if(next <= 0) return 1;
n += next;
}
return 0;
}
void read_all(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
int next = read(fd, buffer + n, bytes-n);
if(next <= 0) error("read failed");
n += next;
}
}
void write_all(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
size_t next = write(fd, buffer + n, bytes-n);
if(next <= 0) error("write failed");
n += next;
}
}
char *copy_string(char *s)
{
if(!s) {
return NULL;
}
char* copy = (char*)xmalloc(strlen(s) + 1);
strncpy(copy, s, strlen(s)+1);
return copy;
}
list *parse_csv_line(char *line)
{
list *l = make_list();
char *c, *p;
int in = 0;
for(c = line, p = line; *c != '\0'; ++c){
if(*c == '"') in = !in;
else if(*c == ',' && !in){
*c = '\0';
list_insert(l, copy_string(p));
p = c+1;
}
}
list_insert(l, copy_string(p));
return l;
}
int count_fields(char *line)
{
int count = 0;
int done = 0;
char *c;
for(c = line; !done; ++c){
done = (*c == '\0');
if(*c == ',' || done) ++count;
}
return count;
}
float *parse_fields(char *line, int n)
{
float* field = (float*)xcalloc(n, sizeof(float));
char *c, *p, *end;
int count = 0;
int done = 0;
for(c = line, p = line; !done; ++c){
done = (*c == '\0');
if(*c == ',' || done){
*c = '\0';
field[count] = strtod(p, &end);
if(p == c) field[count] = nan("");
if(end != c && (end != c-1 || *end != '\r')) field[count] = nan(""); //DOS file formats!
p = c+1;
++count;
}
}
return field;
}
float sum_array(float *a, int n)
{
int i;
float sum = 0;
for(i = 0; i < n; ++i) sum += a[i];
return sum;
}
float mean_array(float *a, int n)
{
return sum_array(a,n)/n;
}
void mean_arrays(float **a, int n, int els, float *avg)
{
int i;
int j;
memset(avg, 0, els*sizeof(float));
for(j = 0; j < n; ++j){
#pragma omp parallel for
for(i = 0; i < els; ++i){
avg[i] += a[j][i];
}
}
#pragma omp parallel for
for(i = 0; i < els; ++i){
avg[i] /= n;
}
}
void print_statistics(float *a, int n)
{
float m = mean_array(a, n);
float v = variance_array(a, n);
printf("MSE: %.6f, Mean: %.6f, Variance: %.6f\n", mse_array(a, n), m, v);
}
float variance_array(float *a, int n)
{
int i;
float sum = 0;
float mean = mean_array(a, n);
for(i = 0; i < n; ++i) sum += (a[i] - mean)*(a[i]-mean);
float variance = sum/n;
return variance;
}
int constrain_int(int a, int min, int max)
{
if (a < min) return min;
if (a > max) return max;
return a;
}
float constrain(float min, float max, float a)
{
if (a < min) return min;
if (a > max) return max;
return a;
}
float dist_array(float *a, float *b, int n, int sub)
{
int i;
float sum = 0;
for(i = 0; i < n; i += sub) sum += pow(a[i]-b[i], 2);
return sqrt(sum);
}
float mse_array(float *a, int n)
{
int i;
float sum = 0;
for(i = 0; i < n; ++i) sum += a[i]*a[i];
return sqrt(sum/n);
}
void normalize_array(float *a, int n)
{
int i;
float mu = mean_array(a,n);
float sigma = sqrt(variance_array(a,n));
for(i = 0; i < n; ++i){
a[i] = (a[i] - mu)/sigma;
}
mu = mean_array(a,n);
sigma = sqrt(variance_array(a,n));
}
void translate_array(float *a, int n, float s)
{
int i;
for(i = 0; i < n; ++i){
a[i] += s;
}
}
float mag_array(float *a, int n)
{
int i;
float sum = 0;
for(i = 0; i < n; ++i){
sum += a[i]*a[i];
}
return sqrt(sum);
}
// indicies to skip is a bit array
float mag_array_skip(float *a, int n, int * indices_to_skip)
{
int i;
float sum = 0;
for (i = 0; i < n; ++i) {
if (indices_to_skip[i] != 1) {
sum += a[i] * a[i];
}
}
return sqrt(sum);
}
void scale_array(float *a, int n, float s)
{
int i;
for(i = 0; i < n; ++i){
a[i] *= s;
}
}
int sample_array(float *a, int n)
{
float sum = sum_array(a, n);
scale_array(a, n, 1. / sum);
float r = rand_uniform(0, 1);
int i;
for (i = 0; i < n; ++i) {
r = r - a[i];
if (r <= 0) return i;
}
return n - 1;
}
int sample_array_custom(float *a, int n)
{
float sum = sum_array(a, n);
scale_array(a, n, 1./sum);
float r = rand_uniform(0, 1);
int start_index = rand_int(0, 0);
int i;
for(i = 0; i < n; ++i){
r = r - a[(i + start_index) % n];
if (r <= 0) return i;
}
return n-1;
}
int max_index(float *a, int n)
{
if(n <= 0) return -1;
int i, max_i = 0;
float max = a[0];
for(i = 1; i < n; ++i){
if(a[i] > max){
max = a[i];
max_i = i;
}
}
return max_i;
}
int top_max_index(float *a, int n, int k)
{
if (n <= 0) return -1;
float *values = (float*)xcalloc(k, sizeof(float));
int *indexes = (int*)xcalloc(k, sizeof(int));
int i, j;
for (i = 0; i < n; ++i) {
for (j = 0; j < k; ++j) {
if (a[i] > values[j]) {
values[j] = a[i];
indexes[j] = i;
break;
}
}
}
int count = 0;
for (j = 0; j < k; ++j) if (values[j] > 0) count++;
int get_index = rand_int(0, count-1);
int val = indexes[get_index];
free(indexes);
free(values);
return val;
}
int int_index(int *a, int val, int n)
{
int i;
for (i = 0; i < n; ++i) {
if (a[i] == val) return i;
}
return -1;
}
int rand_int(int min, int max)
{
if (max < min){
int s = min;
min = max;
max = s;
}
int r = (random_gen()%(max - min + 1)) + min;
return r;
}
// From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
float rand_normal()
{
static int haveSpare = 0;
static double rand1, rand2;
if(haveSpare)
{
haveSpare = 0;
return sqrt(rand1) * sin(rand2);
}
haveSpare = 1;
rand1 = random_gen() / ((double) RAND_MAX);
if(rand1 < 1e-100) rand1 = 1e-100;
rand1 = -2 * log(rand1);
rand2 = (random_gen() / ((double)RAND_MAX)) * 2.0 * M_PI;
return sqrt(rand1) * cos(rand2);
}
/*
float rand_normal()
{
int n = 12;
int i;
float sum= 0;
for(i = 0; i < n; ++i) sum += (float)random_gen()/RAND_MAX;
return sum-n/2.;
}
*/
size_t rand_size_t()
{
return ((size_t)(random_gen()&0xff) << 56) |
((size_t)(random_gen()&0xff) << 48) |
((size_t)(random_gen()&0xff) << 40) |
((size_t)(random_gen()&0xff) << 32) |
((size_t)(random_gen()&0xff) << 24) |
((size_t)(random_gen()&0xff) << 16) |
((size_t)(random_gen()&0xff) << 8) |
((size_t)(random_gen()&0xff) << 0);
}
float rand_uniform(float min, float max)
{
if(max < min){
float swap = min;
min = max;
max = swap;
}
#if (RAND_MAX < 65536)
int rnd = rand()*(RAND_MAX + 1) + rand();
return ((float)rnd / (RAND_MAX*RAND_MAX) * (max - min)) + min;
#else
return ((float)rand() / RAND_MAX * (max - min)) + min;
#endif
//return (random_float() * (max - min)) + min;
}
float rand_scale(float s)
{
float scale = rand_uniform_strong(1, s);
if(random_gen()%2) return scale;
return 1./scale;
}
float **one_hot_encode(float *a, int n, int k)
{
int i;
float** t = (float**)xcalloc(n, sizeof(float*));
for(i = 0; i < n; ++i){
t[i] = (float*)xcalloc(k, sizeof(float));
int index = (int)a[i];
t[i][index] = 1;
}
return t;
}
static unsigned int x = 123456789, y = 362436069, z = 521288629;
// Marsaglia's xorshf96 generator: period 2^96-1
unsigned int random_gen_fast(void)
{
unsigned int t;
x ^= x << 16;
x ^= x >> 5;
x ^= x << 1;
t = x;
x = y;
y = z;
z = t ^ x ^ y;
return z;
}
float random_float_fast()
{
return ((float)random_gen_fast() / (float)UINT_MAX);
}
int rand_int_fast(int min, int max)
{
if (max < min) {
int s = min;
min = max;
max = s;
}
int r = (random_gen_fast() % (max - min + 1)) + min;
return r;
}
unsigned int random_gen()
{
unsigned int rnd = 0;
#ifdef WIN32
rand_s(&rnd);
#else // WIN32
rnd = rand();
#if (RAND_MAX < 65536)
rnd = rand()*(RAND_MAX + 1) + rnd;
#endif //(RAND_MAX < 65536)
#endif // WIN32
return rnd;
}
float random_float()
{
unsigned int rnd = 0;
#ifdef WIN32
rand_s(&rnd);
return ((float)rnd / (float)UINT_MAX);
#else // WIN32
rnd = rand();
#if (RAND_MAX < 65536)
rnd = rand()*(RAND_MAX + 1) + rnd;
return((float)rnd / (float)(RAND_MAX*RAND_MAX));
#endif //(RAND_MAX < 65536)
return ((float)rnd / (float)RAND_MAX);
#endif // WIN32
}
float rand_uniform_strong(float min, float max)
{
if (max < min) {
float swap = min;
min = max;
max = swap;
}
return (random_float() * (max - min)) + min;
}
float rand_precalc_random(float min, float max, float random_part)
{
if (max < min) {
float swap = min;
min = max;
max = swap;
}
return (random_part * (max - min)) + min;
}
#define RS_SCALE (1.0 / (1.0 + RAND_MAX))
double double_rand(void)
{
double d;
do {
d = (((rand() * RS_SCALE) + rand()) * RS_SCALE + rand()) * RS_SCALE;
} while (d >= 1); // Round off
return d;
}
unsigned int uint_rand(unsigned int less_than)
{
return (unsigned int)((less_than)* double_rand());
}
int check_array_is_nan(float *arr, int size)
{
int i;
for (i = 0; i < size; ++i) {
if (isnan(arr[i])) return 1;
}
return 0;
}
int check_array_is_inf(float *arr, int size)
{
int i;
for (i = 0; i < size; ++i) {
if (isinf(arr[i])) return 1;
}
return 0;
}
int *random_index_order(int min, int max)
{
int *inds = (int *)xcalloc(max - min, sizeof(int));
int i;
for (i = min; i < max; ++i) {
inds[i - min] = i;
}
for (i = min; i < max - 1; ++i) {
int swap = inds[i - min];
int index = i + rand() % (max - i);
inds[i - min] = inds[index - min];
inds[index - min] = swap;
}
return inds;
}
int max_int_index(int *a, int n)
{
if (n <= 0) return -1;
int i, max_i = 0;
int max = a[0];
for (i = 1; i < n; ++i) {
if (a[i] > max) {
max = a[i];
max_i = i;
}
}
return max_i;
}
// Absolute box from relative coordinate bounding box and image size
boxabs box_to_boxabs(const box* b, const int img_w, const int img_h, const int bounds_check)
{
boxabs ba;
ba.left = (b->x - b->w / 2.)*img_w;
ba.right = (b->x + b->w / 2.)*img_w;
ba.top = (b->y - b->h / 2.)*img_h;
ba.bot = (b->y + b->h / 2.)*img_h;
if (bounds_check) {
if (ba.left < 0) ba.left = 0;
if (ba.right > img_w - 1) ba.right = img_w - 1;
if (ba.top < 0) ba.top = 0;
if (ba.bot > img_h - 1) ba.bot = img_h - 1;
}
return ba;
}
int make_directory(char *path, int mode)
{
#ifdef WIN32
return _mkdir(path);
#else
return mkdir(path, mode);
#endif
}
char *get_file_name(char *filename)
{
char* file = basename(filename);
char* name = strtok(file, ".");
return name;
}
void get_timestamp(char **timestamp)
{
time_t t = time(NULL);
struct tm tm = *localtime(&t);
sprintf(*timestamp, "%d-%02d-%02d_%02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
// sprintf(*timestamp, "%d-%02d-%02d", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
}
int check_if_file_exists(char* filename)
{
return access(filename, F_OK);
} |
TAD.h | //
// @author Adam Gibson
//
#ifndef LIBND4J_TAD_H
#define LIBND4J_TAD_H
#include <helpers/shape.h>
#include <pointercast.h>
namespace shape {
/**
* Dimension collapse is an algorithm
* for collapsing singular dimensions.
* This algorithm will adjust the dimensions
* wrt the original.
*
* The algorithm has 3 components:
* trailing ones
* middle ones
* beginning ones
*
* dimensions that are specified to reduce along
* that are singular should be truncated
*
* dimensions that are specified that are singular
* at the beginning should be removed with middle dimensions
* decremented.
*
* For any time there is a no op, a collapse will
* set the first dimension to be -1.
*
*
*/
class TAD {
public:
Nd4jLong tadIndex = 0;
int dimensionLength;
int* dimension = nullptr;
Nd4jLong *shapeInfo = nullptr;
Nd4jLong *tadOnlyShapeInfo = nullptr;
Nd4jLong numTads = 0;
int tadRank = 0;
Nd4jLong *tadShape = nullptr;
Nd4jLong *tadStride = nullptr;
Nd4jLong *tadOffsets = nullptr;
Nd4jLong tadOffsetForBlock = 0;
int rank = 0;
int numOnes = 0;
//pointers to original
int originalDimensionLength;
int *originalDimension = nullptr;
Nd4jLong *originalShapeInfo = nullptr;
bool squeezed = false;
bool newSqueezeDimensions = false;
int numOnesInMiddle = 0;
bool wholeThing = false;
//need to track whether we create a new dimension array or not, we could have just moved the pointer forward
//due to leading ones
bool createdNewDimension = false;
// special case for CUDA, we're passing in __shared__ memory pointers to be used instead of new/malloc
void *ptrManager = nullptr;
int *ptrOutput = nullptr;
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF TAD() {}
#ifdef __CUDACC__
__host__ __device__
#endif
TAD(int tadIndex,Nd4jLong *shapeInfo,int *dimension,int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
#endif
TAD(Nd4jLong *shapeInfo,int *dimension,int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void setExternalBuffers(void *ptrManager);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void setOutputBuffer(int *ptrOutput);
#ifdef __CUDACC__
__host__ __device__
#endif
/**
* This method is for GPU mostly, it allows to initialize TAD instance with precalculated tadOnlyShapeInfo
*/
INLINEDEF void initWithExternalTAD(Nd4jLong *existingTAD, Nd4jLong *originalShape, int *dimension, int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void init(Nd4jLong *shapeInfo,int *dimension,int dimensionLength);
template <typename T>
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void printTADsND(T *x);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong* permuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void createTadOnlyShapeInfo();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong lengthPerSlice(Nd4jLong *shapeBuffer);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong* tad2Sub(Nd4jLong index);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF ~TAD();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int* permuteDims();
/**
* Compute the tad offset given a dimension.
*
* The general pattern for computing a tad offset is as follows:
* Every $STRIDE that was removed (the first dimension)
* do a jump by the major stride of the parent array
* (stride[0] of the parent array)
*
* For example given a c ordered 2,2,3,2 with stride 12,6,2,1
* A tad of dimension 1 will jump 12 every 6 tads.
*
* You then end up with offsets of:
* 0
* 1
* 2
* 3
* 4
* 5
* 12
* 13
* 14
* 15
* 16
* 17
*
* notice there are 12 tads here. This same incremental jump will happen
* every time.
* Note here that by default the
* stride of element wise stride is used for the hops.
*
* Sometimes a jump doesn't happen. If there are less tads
* than the stride of the dimension you removed, the
* element wise stride will always be used.
*
* For example in a dimension of 0,1, you end up with offsets of:
* 0,1,2,3,4,5
*
* Given that the inner most stride of the dimensions that was removed (1)
* had a stride of 6, we never need to do a major stride jump.
*
*/
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong tadOffset(Nd4jLong index);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong* tensorShape();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong* tad2Sub(Nd4jLong index, void *ptrManager);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void createOffsets();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong* shapeInfoOnlyShapeAndStride();
/**
* Length of a tad given
* the shape information
*/
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
INLINEDEF void createOffsetForBlock(int blockIdx) {
this->tadOffsetForBlock = this->tadOffset(blockIdx);
}
#endif
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void collapse();
};
////
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF TAD::TAD(int tadIndex,Nd4jLong *shapeInfo,int *dimension,int dimensionLength) {
this->tadIndex = tadIndex;
this->init(shapeInfo, dimension, dimensionLength);
}
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF TAD::TAD(Nd4jLong *shapeInfo,int *dimension,int dimensionLength) {
this->init(shapeInfo, dimension, dimensionLength);
}
INLINEDEF void TAD::setExternalBuffers(void *ptrManager) {
this->ptrManager = ptrManager;
}
INLINEDEF void TAD::setOutputBuffer(int *ptrOutput) {
this->ptrOutput = ptrOutput;
}
INLINEDEF void TAD::initWithExternalTAD(Nd4jLong *existingTAD, Nd4jLong *originalShape, int *dimension, int dimensionLength) {
this->tadOnlyShapeInfo = existingTAD;
this->rank = shape::rank(originalShape);
this->originalShapeInfo = originalShape;
this->originalDimension = dimension;
this->originalDimensionLength = dimensionLength;
this->shapeInfo = originalShape;
this->dimension = dimension;
this->dimensionLength = dimensionLength;
this->tadShape = shape::shapeOf(existingTAD);
this->tadStride = shape::stride(existingTAD);
Nd4jLong ews = shape::elementWiseStride(originalShape);
this->numTads = shape::length(originalShape) / shape::length(existingTAD); // this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength);//shape::length(originalShape) / shape::length(existingTAD);
this->wholeThing = this->numTads == 1 || ((this->dimensionLength == this->rank || this->numTads == shape::length(this->shapeInfo)) && ews == 1);
}
INLINEDEF void TAD::init(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
this->originalShapeInfo = shapeInfo;
this->originalDimension = dimension;
this->originalDimensionLength = dimensionLength;
//start off as original references
this->shapeInfo = shapeInfo;
this->dimensionLength = dimensionLength;
this->dimension = dimension;
this->rank = shape::rank(shapeInfo);
this->numTads = dimensionLength == 0 ? 1 : this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength);
Nd4jLong ews = shape::elementWiseStride(shapeInfo);
if(!shape::isVector(shapeInfo)) {
wholeThing = this->numTads == 1 // if number of TADs is 1, we just have input shape == TAD shape
|| ((this->dimensionLength == this->rank // if number of dimensions is the same as input rank, that'll be wholeTad too, but only if EWS==1 (aka - not a View)
|| (this->numTads == shape::length(shapeInfo) && shape::order(shapeInfo) == 'c')) // OR number of tads equals to shapeInfo length AND input is in C order. if order is F - we'll have to calculate offsets
&& ews == 1); // as mentioned above - last 2 rules apply only to non-views
} else if(shape::isScalar(shapeInfo)) {
wholeThing = true;
//vector case
} else {
// if(dimensionLength == 1 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) {
if(dimension == 0 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) {
wholeThing = true;
}
}
}
template <typename T>
INLINEDEF void TAD::printTADsND(T *x) {
if(wholeThing) {
for(int i = 0; i < shape::length(tadOnlyShapeInfo); i++) {
printf(" %f ",x[i]);
}
printf("\n");
}
else {
for (int i = 0; i < numTads; i++) {
auto offset = tadOffsets[i];
Nd4jLong shapeIter[MAX_RANK];
Nd4jLong coord[MAX_RANK];
int dim;
int rankIter = shape::rank(tadOnlyShapeInfo);
Nd4jLong xStridesIter[MAX_RANK];
T *xPointer = x + offset;
if (PrepareOneRawArrayIter<T>(rankIter,
shape::shapeOf(tadOnlyShapeInfo),
xPointer,
shape::stride(tadOnlyShapeInfo),
&rankIter,
shapeIter,
&xPointer,
xStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, shape::rank(tadOnlyShapeInfo), coord, shapeIter); {
/* Process the innermost dimension */
printf(" %f ",xPointer[0]);
}
ND4J_RAW_ITER_ONE_NEXT(dim,
rankIter,
coord,
shapeIter,
xPointer,
xStridesIter);
printf("\n");
}
else {
printf("Unable to prepare array\n");
}
}
}
}
INLINEDEF void TAD::permuteShapeBufferInPlace(Nd4jLong* shapeBuffer, int* rearrange, Nd4jLong* out) {
memcpy(out, shapeBuffer, sizeof(Nd4jLong) * shape::shapeInfoLength(this->rank));
doPermuteShapeBuffer(this->rank, out, rearrange);
}
INLINEDEF Nd4jLong* TAD::permuteShapeBuffer(Nd4jLong* shapeBuffer, int *rearrange) {
int len = shape::shapeInfoLength(this->rank);
Nd4jLong *copy = shape::copyOf(len,shapeBuffer);
doPermuteShapeBuffer(rank, copy,rearrange);
return copy;
}
INLINEDEF void TAD::createTadOnlyShapeInfo() {
this->tadOnlyShapeInfo = this->shapeInfoOnlyShapeAndStride();
if (this->tadShape != nullptr)
delete[] this->tadShape;
this->tadShape = shape::shapeOf(this->tadOnlyShapeInfo);
this->tadStride = shape::stride(this->tadOnlyShapeInfo);
/* if(tadIndex > 0) {
this->createOffsets();
this->tadOnlyShapeInfo[shape::shapeInfoLength(shape::rank(this->tadOnlyShapeInfo)) - 3] = this->tadOffsets[tadIndex];
}*/
}
INLINEDEF Nd4jLong TAD::lengthPerSlice(Nd4jLong* shapeBuffer) {
int dimension = 0;
Nd4jLong *remove = shape::removeIndex(shape::shapeOf(shapeBuffer),&dimension,shape::rank(shapeBuffer),1);
Nd4jLong prod = shape::prodLong(remove, shape::rank(shapeBuffer) - 1);
delete[] remove;
return prod;
}
INLINEDEF Nd4jLong* TAD::tad2Sub(Nd4jLong index) {
Nd4jLong *shape = shape::shapeOf(shapeInfo);
int rank = shape::rank(shapeInfo);
int leftOverIndexLen = rank - originalDimensionLength;
#ifdef __CUDACC__
Nd4jLong *ret;
Nd4jLong *tadShape;
Nd4jLong *leftOverIndexes;
Nd4jLong *sub;
if (ptrManager != nullptr) {
UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager;
ret = (Nd4jLong *) manager->getTempRankBuffer1();
tadShape = (Nd4jLong *) manager->getTempRankBuffer2();
leftOverIndexes = (Nd4jLong *) manager->getTempRankBuffer3();
sub = (Nd4jLong *) manager->getTempRankBuffer4();
} else {
ret = new Nd4jLong[rank];
tadShape = new Nd4jLong[leftOverIndexLen];
leftOverIndexes = new Nd4jLong[leftOverIndexLen];
sub = new Nd4jLong[rank];
}
#else
Nd4jLong *ret = new Nd4jLong[rank];
//shape of the tad
Nd4jLong *tadShape = new Nd4jLong[leftOverIndexLen];
Nd4jLong *leftOverIndexes = new Nd4jLong[leftOverIndexLen];
Nd4jLong *sub = new Nd4jLong[rank];
#endif
//indexes not specified in the tad indexes
//every coordinate starts as zero
memset(ret,0, shape::shapeInfoByteLength(rank));
//find the length of the elements we
//are iterating over
int len = 1;
//left over index cursor for initializing elements
int leftOverIndex = 0;
for(int i = 0; i < rank; i++) {
//look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference)
bool found = false;
for(int j = 0; j < originalDimensionLength; j++) {
//skip over specified dimensions when computing left over length
if(i == originalDimension[j]) {
found = true;
break;
}
}
//add to the indexes that aren't specified as part of the tad dimension
//indexes
if(!found) {
//accumulate the list of indexes left over used for initializing the return value
leftOverIndexes[leftOverIndex] = i;
//accumulate the tad shape
tadShape[leftOverIndex] = shape[i];
//accumulate the length (product) of the indexes that will be iterated over
len *= shape[i];
leftOverIndex++;
}
}
//sub for indices
/* int *sub = new int[leftOverIndexLen];
shape::ind2subOrder(tadShape,index,len,sub);
*/
shape::ind2subC(leftOverIndexLen,tadShape, index,len, sub);
for(int i = 0; i < leftOverIndexLen; i++) {
ret[leftOverIndexes[i]] = sub[i];
}
if (ptrManager == nullptr) {
delete[] tadShape;
delete[] leftOverIndexes;
delete[] sub;
}
return ret;
}
INLINEDEF TAD::~TAD() {
//we may have just moved the pointer forward, we may not need to delete the pointer here
if(originalDimension != this->dimension && createdNewDimension) {
delete[] this->dimension;
}
if(this->originalShapeInfo != this->shapeInfo) {
delete[] this->shapeInfo;
}
if(this->tadOffsets != nullptr) {
delete[] this->tadOffsets;
}
if(this->tadOnlyShapeInfo != nullptr && this->tadOnlyShapeInfo != shapeInfo) {
delete[] this->tadOnlyShapeInfo;
}
}
INLINEDEF int* TAD::permuteDims() {
//permute dimensions for tad
int dimIdx = 0;
//loop backwards assuming dimension is sorted
int *permuteDims = new int[shape::rank(shapeInfo)];
for(int i = 0; i < shape::rank(shapeInfo); i++) {
bool found = false;
for(int j = 0; j < originalDimensionLength; j++) {
if(i == originalDimension[j]) {
found = true;
break;
}
}
//not found, append it to the end for permute
if(!found)
permuteDims[dimIdx++] = i;
}
for(int i = originalDimensionLength - 1; i >= 0; i--) {
permuteDims[dimIdx++] = originalDimension[i];
}
/*
for (int i = 0; i < originalDimensionLength; i++) {
permuteDims[i] = originalDimension[i];
}
*/
//permute dimensions for tad
return permuteDims;
}
INLINEDEF Nd4jLong TAD::tadOffset(Nd4jLong index) {
if(tadOnlyShapeInfo == nullptr) {
this->createTadOnlyShapeInfo();
}
if(wholeThing)
return index;
if(dimensionLength > 1) {
Nd4jLong *tad2Sub = this->tad2Sub(index, ptrManager);
Nd4jLong ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo));
if(ret < 0) {
if (ptrManager == nullptr)
delete[] tad2Sub;
return -1;
}
if (ptrManager == nullptr)
delete[] tad2Sub;
return ret;
}
else {
Nd4jLong *tad2Sub = this->tad2Sub(index, ptrManager);
Nd4jLong ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo));
if (ptrManager == nullptr)
delete[] tad2Sub;
return ret;
}
}
INLINEDEF Nd4jLong* TAD::tensorShape() {
if(this->tadShape != nullptr)
return this->tadShape;
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(theShape, this->dimension, dimensionLength,shape::rank(shapeInfo));
this->tadShape = tensorShape;
this->tadRank = dimensionLength;
return tensorShape;
}
INLINEDEF Nd4jLong* TAD::tad2Sub(Nd4jLong index, void *ptrManager) {
auto shape = shape::shapeOf(shapeInfo);
int rank = shape::rank(shapeInfo);
int leftOverIndexLen = rank - originalDimensionLength;
Nd4jLong *tadShape;
Nd4jLong *leftOverIndexes;
Nd4jLong *sub;
Nd4jLong *ret;
#ifdef __CUDACC__
if (ptrManager != nullptr) {
UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager;
ret = (Nd4jLong *) manager->getTempRankBuffer1();
tadShape = (Nd4jLong *) manager->getTempRankBuffer2();
leftOverIndexes = (Nd4jLong *) manager->getTempRankBuffer3();
sub = (Nd4jLong *) manager->getTempRankBuffer4();
} else {
ret = new Nd4jLong[rank];
//shape of the tad
leftOverIndexes = new Nd4jLong[leftOverIndexLen];
sub = new Nd4jLong[rank];
tadShape = new Nd4jLong[leftOverIndexLen];
}
#else
ret = new Nd4jLong[rank];
//shape of the tad
leftOverIndexes = new Nd4jLong[leftOverIndexLen];
sub = new Nd4jLong[rank];
tadShape = new Nd4jLong[leftOverIndexLen];
#endif
//indexes not specified in the tad indexes
//every coordinate starts as zero
memset(ret,0,sizeof(Nd4jLong) * rank);
//find the length of the elements we
//are iterating over
int len = 1;
//left over index cursor for initializing elements
int leftOverIndex = 0;
for(int i = 0; i < rank; i++) {
//look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference)
bool found = false;
for(int j = 0; j < originalDimensionLength; j++) {
//skip over specified dimensions when computing left over length
if(i == originalDimension[j]) {
found = true;
break;
}
}
//add to the indexes that aren't specified as part of the tad dimension
//indexes
if(!found) {
//accumulate the list of indexes left over used for initializing the return value
leftOverIndexes[leftOverIndex] = i;
//accumulate the tad shape
tadShape[leftOverIndex] = shape[i];
//accumulate the length (product) of the indexes that will be iterated over
leftOverIndex++;
len *= shape[i];
}
}
//sub for indices
/* int *sub = new int[leftOverIndexLen];
shape::ind2subOrder(tadShape,index,len,sub);
*/
shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub);
for(int i = 0; i < leftOverIndexLen; i++) {
ret[leftOverIndexes[i]] = sub[i];
}
if (ptrManager == nullptr) {
delete[] leftOverIndexes;
delete[] tadShape;
delete[] sub;
}
return ret;
}
INLINEDEF void TAD::createOffsets() {
this->tadOffsets = new Nd4jLong[this->numTads];
#pragma omp parallel for if (this->numTads > 128) schedule(static) proc_bind(close) default(shared)
for(int i = 0; i < this->numTads; i++) {
this->tadOffsets[i] = this->tadOffset(i);
}
}
INLINEDEF Nd4jLong* TAD::shapeInfoOnlyShapeAndStride() {
if(wholeThing && (dimensionLength == 1 && dimension[0] == MAX_DIMENSION) || shape::isScalar(shapeInfo))
return shape::createScalarShapeInfo();
//ensure tad shapes get setup right for vectors
if(dimensionLength > 1 && shape::isVector(shapeInfo))
return shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)),shapeInfo);
// case when tad coincides with whole array
if( this->numTads == 1 && ((shape::rank(originalShapeInfo) == originalDimensionLength) || originalDimensionLength == 0)) {
// we might have special case here: skipped dimensions might be just full of ones
Nd4jLong *ret = shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)), shapeInfo);
if (shape::isDimPermuted<int>(dimension, (Nd4jLong) dimensionLength)) // check whether we need permutation
shape::doPermuteShapeBuffer(ret, dimension);
return ret;
}
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
int rank = shape::rank(shapeInfo);
if(dimensionLength == 1) {
if(dimension[0] == 0 && shape::isVector(shapeInfo) && theShape[1] == 1) {
int permuted[2] = {1,0};
Nd4jLong *permutedRet2 = shape::permuteShapeBuffer(shapeInfo, permuted);
return permutedRet2;
} else if(dimension[0] == 1 && shape::isVector(shapeInfo) && theShape[0] == 1) {
Nd4jLong *ret = shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)),shapeInfo);
return ret;
}
else if(shape::shapeOf(shapeInfo)[dimension[0]] == 1) {
Nd4jLong *scalarInfo = shape::createScalarShapeInfo();
scalarInfo[shape::shapeInfoLength(shape::rank(scalarInfo)) - 3] = this->tadIndex;
return scalarInfo;
}
}
Nd4jLong *tensorShape = this->tensorShape();
int *reverseDimensions = shape::reverseCopy(dimension, dimensionLength);
int *rankRange = shape::range<int>(0, rank);
int *remove = shape::removeIndex<int>(rankRange, dimension, (Nd4jLong) rank, (Nd4jLong) dimensionLength);
//concat is wrong here with the length
int *newPermuteDims = shape::concat(remove,rank - dimensionLength,reverseDimensions,dimensionLength);
Nd4jLong* permuted = shape::permuteShapeBuffer(shapeInfo,newPermuteDims);
Nd4jLong sliceIndex = shape::sliceOffsetForTensor(shape::rank(permuted),
this->tadIndex,
shape::shapeOf(shapeInfo),
tensorShape,
dimensionLength,
dimension,
dimensionLength);
Nd4jLong *ret2 = shape::sliceOfShapeBuffer(sliceIndex, permuted);
Nd4jLong tensorLength = shape::prodLong(tensorShape,tadRank);
int compLength = shape::isVector(ret2) ? shape::length(ret2) : shape::prod(tensorShape,tadRank);
// int temp;
// const bool isLikeVector = shape::isLikeVector(ret2, temp);
// if(dimensionLength == tadRank && compLength == shape::length(ret2) && !isLikeVector) {
if(dimensionLength == tadRank && compLength == shape::length(ret2)) {
if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) {
//go to the bottom and return ret2 after proper freeing of pointers
//basic idea; we *don't* permute row vectors
}
else if(dimensionLength > 1) {
//permute *then* return ret2
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
shape::permuteShapeBufferInPlace(ret2,finalPermuteDims,ret2);
delete[] finalPermuteDims;
}
}
else {
Nd4jLong length = tensorLength;
Nd4jLong lengthPerSlice = this->lengthPerSlice(ret2);
if(lengthPerSlice < 1) {
return ret2;
}
Nd4jLong offset = tadIndex * tensorLength /lengthPerSlice;
if(sliceIndex == 0 && length == lengthPerSlice) {
Nd4jLong *newRet2 = shape::sliceOfShapeBuffer(offset, ret2);
delete[] ret2;
ret2 = newRet2;
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
// bool isRowVector2 = shape::isRowVector(ret2) && !isLikeVector;
bool isRowVector2 = shape::isRowVector(ret2);
if(isRowVector2 == false) {
shape::permuteShapeBufferInPlace(ret2, finalPermuteDims, ret2);
}
delete[] finalPermuteDims;
}
else if(length == lengthPerSlice) {
offset -= shape::slices(ret2) * (offset / shape::slices(ret2));
Nd4jLong *newRet2 = shape::sliceOfShapeBuffer(offset,ret2);
delete[] ret2;
ret2 = newRet2;
if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) {
//go to the bottom and return ret2 after proper freeing of pointers
//basic idea; we *don't* permute row vectors
}
else {
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
Nd4jLong *newRet = shape::permuteShapeBuffer(ret2, finalPermuteDims);
delete[] ret2;
delete[] finalPermuteDims;
ret2 = newRet;
}
}
else {
//execute final part, note that this is mainly so delete[] gets called
//at the bottom of the method
while(shape::length(ret2) > length) {
auto lengthPerSlice2 = this->lengthPerSlice(ret2);
sliceIndex = sliceOffsetForTensor(sliceIndex,shape::length(ret2),lengthPerSlice2);
sliceIndex -= shape::slices(ret2) * (sliceIndex / shape::slices(ret2));
auto newRet2 = shape::sliceOfShapeBuffer(sliceIndex,ret2);
delete[] ret2;
ret2 = newRet2;
}
//don't permute on a row vector
if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) {
//go to the bottom and return ret2 after proper freeing of pointers
//basic idea; we *don't* permute row vectors
}
else if(dimensionLength > 1){
//permute *then* return ret
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
auto newPermute = shape::permuteShapeBuffer(ret2,finalPermuteDims);
delete[] ret2;
delete[] finalPermuteDims;
ret2 = newPermute;
}
}
}
delete[] permuted;
delete[] newPermuteDims;
delete[] rankRange;
delete[] remove;
delete[] reverseDimensions;
return ret2;
}
INLINEDEF Nd4jLong TAD::tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
Nd4jLong ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
INLINEDEF Nd4jLong TAD::tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
return shape::length(shapeInfo) / this->tadLength(shapeInfo,dimension,dimensionLength);
}
INLINEDEF void TAD::collapse() {
auto shape = shape::shapeOf(shapeInfo);
//handle negative dimensions/backwards indexing
for(int i = 0; i < dimensionLength; i++) {
if((dimension)[i] < 0)
(dimension)[i] += shape::rank(this->shapeInfo);
}
this->dimension = new int[dimensionLength];
memcpy(this->dimension,this->originalDimension, sizeof(int) * dimensionLength);
//we can drop trailing dimensions where it's all singular for example:
// shape: 4,3,1,2
//dimension: 0,2
// the problem for 0,2 is equivalent to: 0
//the rest of the algorithm handles cases suchas
//shape: 4,1,1,2
//dimension: 0,1
//when this happens there are other dimensions (eg: at the end) that matter
int trailingOneDimensions = 0;
//trailing ones
for(int i = dimensionLength - 1; i >= 0; i--) {
if(shape[dimension[i]] != 1) {
break;
}
else if(shape[dimension[i]] == 1)
trailingOneDimensions++;
}
dimensionLength -= trailingOneDimensions;
int leadingOneDimensions = 0;
//trailing ones
for(int i = 0; i < dimensionLength; i++) {
if(shape[dimension[i]] != 1) {
break;
}
else if(shape[dimension[i]] == 1)
leadingOneDimensions++;
}
//bump the dimension pointer forward for however many leadingones there are
dimension += leadingOneDimensions;
//decrease the dimension length by the amount of leading ones
dimensionLength -= leadingOneDimensions;
bool preConverged = true;
for(int i = 0; i < dimensionLength; i++) {
if(shape[dimension[i]] == 1) {
preConverged = false;
break;
}
}
//we took away all the singular dimensions, we can just return
if(preConverged)
return;
//no more singular dimensions specified
bool done = false;
int onesDecrement = 0;
bool changed = false;
while(!done) {
//terminate early: only singular dimensions specified for reduce
if((dimensionLength) < 1) {
done = true;
//signal as a no op
dimension[0] = -1;
break;
}
//captures intermediary result from the for loop
traceNew(3);
int intermediaryResult[MAX_RANK];
for(int i = 0; i < dimensionLength; i++) {
intermediaryResult[i] = (dimension)[i];
}
bool oneEncountered = false;
bool nonOneEncountered = false;
bool hitBeginning = false;
//assume intermediate collapsing of dimensions
bool collapseMiddleDimensions = true;
//note here that dimension length MAY end up being zero
for(int i = (dimensionLength) - 1; i >= 0; i--) {
if(shape[(dimension)[i]] == 1) {
oneEncountered = true;
//trailing ones
if(!nonOneEncountered) {
//just drop trailing ones
dimensionLength--;
nonOneEncountered = false;
collapseMiddleDimensions = false;
//intermediary result just needs to have the results copied from dimension since we're just removing the tail
memcpy(intermediaryResult,dimension, sizeof(int) * dimensionLength);
changed = true;
//break the for loop and force it to go back around starting from the new index
break;
}
else {
//already decremented all dimensions
//this was a result of hitting beginning ones
//we will only need to loop once
if(i == 0) {
hitBeginning = true;
}
//will need to shift dimensions that aren't trailing ones
//back by onesDecrement
//mark the intermediary result as -1 for non inclusion
intermediaryResult[i] = -1;
onesDecrement++;
}
}
else {
intermediaryResult[i] = (dimension)[i];
nonOneEncountered = true;
}
}
if(collapseMiddleDimensions && oneEncountered) {
//collapse dimensions
int newIntermediary[MAX_RANK];
int idx = 0;
for(int i = 0; i < dimensionLength; i++) {
//of note: dimension will decrease by the number of ones encountered
if(intermediaryResult[i] >= 0) {
//dimension 0 doesn't need to be decremented
if(intermediaryResult[i] > 0)
newIntermediary[idx++] = intermediaryResult[i] - onesDecrement;
else
newIntermediary[idx++] = intermediaryResult[i];
}
}
//decrement by the number of dimensions where ones appeared
(dimensionLength) -= onesDecrement;
//update to current result
memcpy(dimension,newIntermediary, sizeof(int) * (dimensionLength));
changed = true;
}
//converged: no need to change result
else {
//update to current result
memcpy(dimension,intermediaryResult, sizeof(int) * dimensionLength);
}
//converge when there are no singular dimensions specified in the reduce
done = (!oneEncountered && nonOneEncountered) || hitBeginning;
//delete[] intermediaryResult;
}
//nothing changed but need to collapse dimension
if(!changed && this->numOnes > 0) {
for(int i = 0; i < dimensionLength ;i++) {
dimension[i] -= numOnes;
}
}
}
}
#endif //LIBND4J_TAD_H
|
hoTvOperator.h | #pragma once
#include "hoNDArray_math.h"
#include "generalOperator.h"
#include "vector_td_operators.h"
#ifdef USE_OMP
#include <omp.h>
#endif
namespace Gadgetron{
template<class T, unsigned int D> class hoTvOperator
: public generalOperator< hoNDArray<T> >
{
protected:
typedef typename realType<T>::Type REAL;
public:
hoTvOperator() : generalOperator< hoNDArray<T> >(){
limit_ = REAL(1e-8);
}
virtual ~hoTvOperator() {}
void set_limit(REAL limit){
limit_ = limit;
}
virtual void gradient( hoNDArray<T> *in_array, hoNDArray<T> *out_array, bool accumulate=false )
{
if (in_array->get_number_of_elements() != out_array->get_number_of_elements()){
throw std::runtime_error("hoTvOperator: input/output array dimensions mismatch");
}
T* in = in_array->get_data_ptr();
T* out = out_array->get_data_ptr();
auto dims = vector_td<int64_t,D>(from_std_vector<size_t, D>(in_array->dimensions()));
if (!accumulate)
clear(out_array);
#ifdef USE_OMP
#pragma omp parallel for
#endif
for (int64_t idx=0; idx < in_array->get_number_of_elements(); idx++){
T xi = in[idx];
T result = T(0);
vector_td<int64_t,D> co = idx_to_co(idx, dims);
REAL grad = gradient_(in,dims,co);
if (grad > limit_) {
result += REAL(D)*xi/grad;
for (int i = 0; i < D; i++){
co[i]+=1;
result -= in[co_to_idx((co+dims)%dims,dims)]/grad;
co[i]-=1;
}
}
for (int i = 0; i < D; i++){
co[i]-=1;
grad = gradient_(in,dims,co);
if (grad > limit_) {
result +=(xi-in[co_to_idx((co+dims)%dims,dims)])/grad;
}
co[i]+=1;
}
out[idx] += this->weight_*result;
}
}
virtual REAL magnitude( hoNDArray<T> *in_array )
{
T* in = in_array->get_data_ptr();
auto dims = vector_td<int64_t,D>(from_std_vector<size_t, D>(in_array->dimensions()));
REAL result =0;
#ifdef USE_OMP
#pragma omp parallel for reduction(+:result)
#endif
for (int64_t idx=0; idx < in_array->get_number_of_elements(); idx++){
auto co = idx_to_co(idx, dims);
REAL grad = gradient_(in,dims,co);
result += this->weight_*grad;
}
return result;
}
private:
REAL inline gradient_(T* in, const vector_td<int64_t,D> dims, vector_td<int64_t,D> co)
{
REAL grad = REAL(0);
T xi = in[co_to_idx((co+dims)%dims,dims)];
for (int i = 0; i < D; i++){
co[i]+=1;
T dt = in[co_to_idx((co+dims)%dims,dims)];
grad += norm(xi-dt);
co[i]-=1;
}
return std::sqrt(grad);
}
protected:
REAL limit_;
};
}
|
compatibility.h | // -*- C++ -*-
// Copyright (C) 2007-2019 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/compatibility.h
* @brief Compatibility layer, mostly concerned with atomic operations.
*
* This file is a GNU parallel extension to the Standard C++ Library
* and contains implementation details for the library's internal use.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
#define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#if !defined(_WIN32) || defined (__CYGWIN__)
#include <sched.h>
#endif
#ifdef __MINGW32__
// Including <windows.h> will drag in all the windows32 names. Since
// that can cause user code portability problems, we just declare the
// one needed function here.
extern "C"
__attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
#endif
namespace __gnu_parallel
{
template<typename _Tp>
inline _Tp
__add_omp(volatile _Tp* __ptr, _Tp __addend)
{
int64_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
}
/** @brief Add a value to a variable, atomically.
*
* @param __ptr Pointer to a signed integer.
* @param __addend Value to add.
*/
template<typename _Tp>
inline _Tp
__fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
{
if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
return __add_omp(__ptr, __addend);
}
template<typename _Tp>
inline bool
__cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
}
/** @brief Compare-and-swap
*
* Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* @param __ptr Pointer to signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
template<typename _Tp>
inline bool
__compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
false, __ATOMIC_ACQ_REL,
__ATOMIC_RELAXED);
return __cas_omp(__ptr, __comparand, __replacement);
}
/** @brief Yield control to another thread, without waiting for
* the end of the time slice.
*/
inline void
__yield()
{
#if defined (_WIN32) && !defined (__CYGWIN__)
Sleep(0);
#else
sched_yield();
#endif
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
|
GB_unop__abs_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_fp32_fc32)
// op(A') function: GB (_unop_tran__abs_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cabsf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cabsf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cabsf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cabsf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cabsf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mypaint-tiled-surface.c | /* brushlib - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <martinxyz@gmx.ch> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
int i;
for (i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successfull, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
*
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active, float center_x)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
int yp;
for (yp = y0; yp <= y1; yp++) {
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
int yp;
for (yp = y0; yp <= y1; yp++) {
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
int yp;
for (yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15), op->normal*op->opaque*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->lock_alpha*op->opaque*(1<<15));
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
int ty;
for (ty = ty1; ty <= ty2; ty++) {
int tx;
for (tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float symm_x = self->surface_center_x + (self->surface_center_x - x);
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
int ty;
for (ty = ty1; ty <= ty2; ty++) {
int tx;
for (tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
sum_r /= sum_weight;
sum_g /= sum_weight;
sum_b /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r / sum_a;
*color_g = sum_g / sum_a;
*color_b = sum_b / sum_a;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->surface_center_x = 0.0f;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
GB_binop__rminus_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_fp32
// A.*B function (eWiseMult): GB_AemultB__rminus_fp32
// A*D function (colscale): GB_AxD__rminus_fp32
// D*A function (rowscale): GB_DxB__rminus_fp32
// C+=B function (dense accum): GB_Cdense_accumB__rminus_fp32
// C+=b function (dense accum): GB_Cdense_accumb__rminus_fp32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_fp32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_fp32
// C=scalar+B GB_bind1st__rminus_fp32
// C=scalar+B' GB_bind1st_tran__rminus_fp32
// C=A+scalar GB_bind2nd__rminus_fp32
// C=A'+scalar GB_bind2nd_tran__rminus_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rminus_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB003-antidep2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A two-level loop nest with loop carried anti-dependence on the outer level.
Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
int main(int argc,char *argv[])
{
omprace_init();
int i, j;
int len = 20;
double a[20][20];
for (i=0; i< len; i++)
for (j=0; j<len; j++)
a[i][j] = 0.5;
#pragma omp parallel for private(j)
for (i = 0; i < len - 1; i += 1) {
for (j = 0; j < len ; j += 1) {
a[i][j] += a[i + 1][j];
}
}
printf ("a[10][10]=%f\n", a[10][10]);
omprace_fini();
return 0;
}
|
7132.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute #p #p
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
tsp_rnd07.c | /*
Description:
This program executes my "Random Swapping" algorithm to solve the "Travelling Salesman Problem"
Abides by Lab 3 Exercise 2 requirements
Author:
Georgios Evangelou (1046900)
Year: 5
Parallel Programming in Machine Learning Problems
Electrical and Computer Engineering Department, University of Patras
System Specifications:
CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips)
GPU: Nvidia GTX 1050 (dual-fan, overclocked)
RAM: 8GB (dual-channel, @2666 MHz)
Version Notes:
Compiles/Runs/Debugs with: gcc tsp_rnd07.c -o tsp_rnd07 -lm -fopt-info -fopenmp -O3 -pg && time ./tsp_rnd07 && gprof ./tsp_rnd07
Executes the algorithm for 10.000 cities, spanning in an area of 1.000x1.000 km and produces correct results
Inherits all settings of the version tsp_rnd05 unless stated otherwise
Needs: ~02 seconds to find a path of length 600.000 (3x-better than tsp_rnd07)
At (15repetitions*12threads*200.000.000steps/repetition/thread) steps it calculated an optimal path of length 454403.3
totaling 08 minutes and 06 seconds in time duration
*/
// ****************************************************************************************************************
#pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations
#pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system
#pragma GCC target("avx") //Enable AVX
// ****************************************************************************************************************
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "omp.h"
// ****************************************************************************************************************
#define N 10000
#define Nx 1000
#define Ny 1000
#define VACANT_POSITION_CODE -999999
#define THREADS 12
#define STEPS_PER_THREAD_PER_REPETITION 100000//200000000
#define DEFAULT_MAX_REPETITIONS 1e6//1e6
#define THRESHOLD_DISTANCE 0//600000
#define DEBUG 1
// ****************************************************************************************************************
float CitiesX[N];
float CitiesY[N];
int Path[N+1];
omp_lock_t Locks[N+1];
// ****************************************************************************************************************
// Initializes the cities' positions
// ****************************************************************************************************************
void SetCities() {
printf("Now initializing the positions of the cities...\n");
for (int i=0; i<N; i++) {
CitiesX[i] = Nx * (float) rand() / RAND_MAX;
CitiesY[i] = Ny * (float) rand() / RAND_MAX;
}
}
// ****************************************************************************************************************
// Initializes the traveling path
// ****************************************************************************************************************
void ResetPath() {
printf("Now initializing the path...\n");
for (int i=0; i<N+1; i++)
Path[i] = -1;
}
// ****************************************************************************************************************
// Checks if a city is already in the path
// ****************************************************************************************************************
int IsInPath(int k) {
for (int i=0; i<N; i++)
if (Path[i] == k) return 1;
return 0;
}
// ****************************************************************************************************************
// Creates a random path
// ****************************************************************************************************************
void RandomizePath() {
int k;
printf("Now randomizing the path...\n");
Path[0] = (N*rand())/RAND_MAX;
Path[N] = Path[0];
for (int i=1; i<N; i++) {
do {
k = ((float)N*rand())/RAND_MAX;
} while (IsInPath(k) == 1);
Path[i] = k;
}
}
// ****************************************************************************************************************
// Prints the cities' positions
// ****************************************************************************************************************
void PrintCities() {
int x, y;
printf("> The cities are:\n");
for (int i=0; i<N; i++) {
printf(">> City: %6d X:%5.2f Y:%5.2f\n", i, CitiesX[i], CitiesY[i] );
}
printf("\n");
}
// ****************************************************************************************************************
// Visually maps the cities' positions
// ****************************************************************************************************************
void MapCities() {
int Map[Ny+1][Nx+1];
printf("Now creating a visual map of the cities...\n");
for (int i=0; i<Nx+1; i++)
for (int j=0; j<Ny+1; j++)
Map[j][i] = (float) VACANT_POSITION_CODE;
//printf("Quantized coordinates are:\n");
for (int c=0; c<N; c++) {
int x = (int) CitiesX[c] ;
int y = (int) CitiesY[c] ;
//printf(" City:%d y=%d and x=%d\n",c,y,x);
if (Map[y][x] == VACANT_POSITION_CODE) Map[y][x] = c+1;
else Map[y][x] = -1;
}
printf("This is the cities' map:\n");
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
for (int y=0; y<Ny+1; y++){
for (int x=0; x<Nx+1; x++)
printf("%8d ", Map[y][x]);
printf("\n");
}
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("\n");
}
// ****************************************************************************************************************
// Finds Euclidean Distance between two cities
// ****************************************************************************************************************
double Distance(int A, int B) {
double result = sqrt( (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) );
return result;
}
// ****************************************************************************************************************
// Finds Euclidean Distance in current path
// ****************************************************************************************************************
double PathDistance() {
double totDist = 0.0;
for (int i=0; i<N; i++) {
totDist += Distance(Path[i], Path[i+1]);
}
totDist += Distance(Path[N], Path[0]);
return totDist;
}
// ****************************************************************************************************************
// Swaps cities if swapping results in shorter Distance
// ****************************************************************************************************************
double SwapCities(double totDist, int repetitions) {
double totDistChange = 0.0;
#pragma omp parallel reduction(+:totDistChange) num_threads(THREADS)
{
int r = 0;
do {
unsigned seed = (r++) + 83*omp_get_thread_num() + 1297*repetitions + 11*omp_get_wtime();
int A = 1 + (int)( ((float) rand_r(&seed) )*(N-2)/((float)RAND_MAX) );
seed++;
int B = 1 + (int)( ((float) rand_r(&seed) )*(N-2)/((float)RAND_MAX) );
while (A==B) {
#pragma omp critical
B = 1 + (int)( ((float) rand())*(N-2)/((float)RAND_MAX) );
}
if (A>B) { int temp = A; A = B; B = temp; } //always: A<B
int flag = B-A-1; //always:flag=0 when A+1==B
omp_set_lock(&Locks[A]); omp_set_lock(&Locks[B]);
double dist1_old = Distance(Path[A-1], Path[A]); //is always needed
double dist2_old = (!flag) ? 0 : Distance(Path[A], Path[A+1]); //dist ommited when A,B consecutive
double dist3_old = (!flag) ? 0 : Distance(Path[B-1], Path[B]); //dist ommited when A,B consecutive
double dist4_old = Distance(Path[B], Path[B+1]); //is always needed
double dist1_new = Distance(Path[A-1], Path[B]); //is always needed
double dist2_new = (!flag) ? 0 : Distance(Path[B], Path[A+1]); //dist ommited when A,B consecutive
double dist3_new = (!flag) ? 0 : Distance(Path[B-1], Path[A]); //dist ommited when A,B consecutive
double dist4_new = Distance(Path[A], Path[B+1]); //is always needed
double distChange = - dist1_old - dist2_old - dist3_old - dist4_old + dist1_new + dist2_new + dist3_new + dist4_new;
if (distChange < 0) { //Must be <0 if it decreases the total Distance
int temp = Path[A];
Path[A] = Path[B];
Path[B] = temp;
} else distChange=0;
omp_unset_lock(&Locks[A]); omp_unset_lock(&Locks[B]);
totDistChange += distChange;
} while (r < STEPS_PER_THREAD_PER_REPETITION) ;
}
return totDist + totDistChange;
}
// ****************************************************************************************************************
// Checks if current program parameters lead to feasible spacial states
// ****************************************************************************************************************
int ValidateParameters() {
if (Nx*Ny<N) return 0;
return 1;
}
// ****************************************************************************************************************
// Initializes the locks
// ****************************************************************************************************************
void InitializeLocks() {
for (int i=0; i<N+1; i++)
omp_init_lock(&Locks[i]);
}
// ****************************************************************************************************************
// The main program
// ****************************************************************************************************************
int main( int argc, const char* argv[] ) {
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("This program searches for the optimal traveling Distance between %d cities,\n", N);
printf("spanning in an area of X=(0,%d) and Y=(0,%d)\n", Nx, Ny);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
if (ValidateParameters() == 0) {
printf("\nERROR: NOT ENOUGH SPACE ALLOCATED FOR GIVEN NUMBER OF CITIES\nThe program will now exit.\n"); return 1; }
int repetitions = 0, MaxRepetitions = DEFAULT_MAX_REPETITIONS; omp_set_dynamic(0);
if (argc>1) MaxRepetitions = atoi(argv[1]);
printf("Maximum number of repetitions set at: %d\n", MaxRepetitions);
printf("Maximum number of steps per thread per repetition set at: %llu\n", MaxRepetitions*STEPS_PER_THREAD_PER_REPETITION);
srand(1046900);
SetCities();
ResetPath();
RandomizePath();
InitializeLocks();
double prevDist, totDist = PathDistance();
printf("Now running the main algorithm...\n");
do {
srand(__TIME__);
prevDist = totDist;
if (repetitions%1==0) printf(">>REPETITION:%5d >>BATCH:%11llu >>ESTIMATED PATH_LENGTH: %.2lf", repetitions, repetitions*STEPS_PER_THREAD_PER_REPETITION*THREADS, totDist);
if (DEBUG) printf(" >>ACTUAL PATH_LENGTH: %.2lf", PathDistance());
printf("\n");
repetitions ++;
totDist = SwapCities(totDist, repetitions);
} while ((repetitions<MaxRepetitions+1) && (totDist>THRESHOLD_DISTANCE));
printf("\nCalculations completed. Results:\n");
printf("Main-routine Repetitions: %d\n", repetitions);
printf(" Sub-routine Repetitions: %llu\n", repetitions*STEPS_PER_THREAD_PER_REPETITION*THREADS);
//printf("Estimation of the optimal path length: %.2lf\n", totDist);
printf("Actual optimal path length: %.2lf\n", PathDistance());
return 0 ;
}
|
ffmygridOMP.c | /* ffmygrid.c
Usage in Matlab: [k lx ly bx by] = ffmygrid(uu,vv,z,xi,yi,Lx,Ly,'Option')
(uu,vv) is k space coordinates.
z is the value at those points.
xi,yi are Cartesian coordinates you wish to have data.
Lx, Ly is convolution kernel size in grid.
'Option' specifies the kind of kernel
'KB' 2d separable Kaiser Bessel function
this is based on 'a Fast Sinc Function Gridding Alogrithm
for Fourier Inversion in Computer Tomography'
by J.K.O'sullivan, IEEE t-mi, Dec 1985
lx ly bx by
Sangwoo Lee, 03/02/2000
*/
#include "math.h"
#include "mex.h"
#include "omp.h"
#include "string.h"
/* function for the zero order modified Bessel funciton of the first kind
*/
double bessi0(double x) {
double fans;
double y, xx, ax, ans;
x = fabs(x);
xx = x;
if ((ax = fabs(xx)) < 3.75) {
y = x / 3.75;
y *= y;
ans =
1.0 +
y * (3.5156229 +
y * (3.0899424 +
y * (1.2067492 +
y * (0.2659732 + y * (0.360768e-1 + y * 0.45813e-2)))));
} else {
y = 3.75 / ax;
ans = (exp(ax) / sqrt(ax)) *
(0.39894228 +
y * (0.1328592e-1 +
y * (0.225319e-2 +
y * (-0.157565e-2 +
y * (0.916281e-2 +
y * (-0.2057706e-1 +
y * (0.2635537e-1 +
y * (-0.1647633e-1 +
y * 0.392377e-2))))))));
}
fans = (double)ans;
return fans;
}
/* This function returns the minimum value in an array of n elements*/
double min(double x[], int n) {
int k;
double min_x;
min_x = x[0];
for (k = 1; k <= n - 1; k++) {
if (x[k] < min_x)
min_x = x[k];
}
return min_x;
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
double *xx, *yy, *xi, *yi, *zzr, *zzi, *lx, *ly;
double *resultr, *resulti, *rlx, *rly, *rbx, *rby, Lx, Ly, Bx, By, Lxy;
double *resultr_private, *resulti_private;
double Kxstep, Kystep;
double kernelVal;
char *opt;
int optlen;
int i, j, k, counter = 0, starti, startj, endi, endj;
float pi = 3.141592;
int M, N, L, N1, N2;
int flag = 0;
double distx, disty, *weight, minxi, minyi;
/* get input arguments */
xx = mxGetPr(prhs[0]);
yy = mxGetPr(prhs[1]);
zzr = mxGetPr(prhs[2]);
zzi = mxGetPi(prhs[2]);
xi = mxGetPr(prhs[3]);
yi = mxGetPr(prhs[4]);
lx = mxGetPr(prhs[5]);
ly = mxGetPr(prhs[6]);
if (nrhs > 8 | nrhs < 6) {
mexErrMsgTxt("\ninvalid number of input");
}
optlen = mxGetNumberOfElements(prhs[7]) + 1;
opt = mxCalloc(optlen, sizeof(char));
mxGetString(prhs[7], opt, optlen);
/* Check for real or complex datatypes */
if (mxIsComplex(prhs[0])) {
mexErrMsgTxt("xx is not real");
}
if (mxIsComplex(prhs[1])) {
mexErrMsgTxt("yy is not real");
}
if (mxIsComplex(prhs[2])) {
flag = 1;
}
if (mxIsComplex(prhs[3])) {
mexErrMsgTxt("xi is not real");
}
if (mxIsComplex(prhs[4])) {
mexErrMsgTxt("yi is not real");
}
/* Check for double datatype. Single is not supported */
if (!(mxIsDouble(prhs[0]))) {
mexErrMsgTxt("xx is not double precision ");
}
if (!(mxIsDouble(prhs[1]))) {
mexErrMsgTxt("yy is not double precision");
}
if (!(mxIsDouble(prhs[2]))) {
mexErrMsgTxt("data is not double precision");
}
if (!(mxIsDouble(prhs[3]))) {
mexErrMsgTxt("xi is not double precision");
}
if (!(mxIsDouble(prhs[4]))) {
mexErrMsgTxt("yi is not double precision");
}
if ((L = mxGetM(prhs[3])) == 1) {
L = mxGetN(prhs[3]);
}
if ((M = mxGetM(prhs[4])) == 1) {
M = mxGetN(prhs[4]);
}
if ((N = mxGetM(prhs[0])) == 1) {
N = mxGetN(prhs[0]);
}
if ((N1 = mxGetM(prhs[1])) == 1) {
N1 = mxGetN(prhs[1]);
}
if ((N2 = mxGetM(prhs[2])) == 1) {
N2 = mxGetN(prhs[2]);
}
if ((N != N1) | (N1 != N2)) {
mexErrMsgTxt("input grid dimension does not match");
}
/*mexPrintf("\n%s",opt);
mexPrintf("\nL=%d, M=%d, N=%d, N1=%d, N2=%d\n",L,M,N,N1,N2);
*/
if (!(plhs[0] = mxCreateDoubleMatrix(M, L, mxCOMPLEX))) {
mexErrMsgTxt("\nfailed to create a matrix");
}
if (!(plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL))) {
mexErrMsgTxt("\nfailed to create a matrix");
}
if (!(plhs[2] = mxCreateDoubleMatrix(1, 1, mxREAL))) {
mexErrMsgTxt("\nfailed to create a matrix");
}
if (!(plhs[3] = mxCreateDoubleMatrix(1, 1, mxREAL))) {
mexErrMsgTxt("\nfailed to create a matrix");
}
if (!(plhs[4] = mxCreateDoubleMatrix(1, 1, mxREAL))) {
mexErrMsgTxt("\nfailed to create a matrix");
}
if (!(plhs[5] = mxCreateDoubleMatrix(M, L, mxREAL))) {
mexErrMsgTxt("\nfailed to create a matrix");
}
if (!(N == N1 & N == N2)) {
mexErrMsgTxt("\ninput (x,y,z) is not matching");
}
resultr = mxGetPr(plhs[0]);
resulti = mxGetPi(plhs[0]);
rlx = mxGetPr(plhs[1]);
rly = mxGetPr(plhs[2]);
rbx = mxGetPr(plhs[3]);
rby = mxGetPr(plhs[4]);
weight = mxGetPr(plhs[5]);
Kxstep = fabs(xi[1] - xi[0]);
Kystep = fabs(yi[1] - yi[0]);
Lx = Kxstep * (*lx);
*rlx = Lx;
Ly = Kystep * (*ly);
*rly = Ly;
/**rlx=Lx=Kxstep*(*lx);
*rly=Ly=Kystep*(*ly);
*/
/* *rbx=Bx=pi*Lx/2/fabs(xi[1]-xi[0]);
*rby=By=pi*Ly/2/fabs(yi[1]-yi[0]);
NOTE: Lx/fabs() = *lx
*/
*rbx = Bx = pi * 1.45 / 2 * (*lx);
*rby = By = pi * 1.45 / 2 * (*ly);
/* minxi=xi[0];
minyi=yi[M-1];
mexPrintf("min x = %g",minxi);
mexPrintf("min y = %g",minyi);
*/
/* minxi = min(xi,M);
mexPrintf("min x = %g \n",minxi);
minyi = min(yi,L);
mexPrintf("min y = %g \n",minyi);*/
minxi = -L / 4.0;
minyi = -M / 4.0;
/* mexPrintf("min y = %g \n",minyi);
mexPrintf("min x = %g \n",minxi); */
for (i = 0; i < L * M; i++) {
resultr[i] = 0;
resulti[i] = 0;
weight[i] = 0;
}
if (!strcmp(opt, "KB") | (nrhs == 7)) {
#pragma omp parallel private(i, j, k, distx, disty, starti, startj, endi, \
endj, kernelVal, resultr_private, \
resulti_private)
{
resultr_private = (double *)malloc(sizeof(double) * L * M);
resulti_private = (double *)malloc(sizeof(double) * L * M);
for (i = 0; i < L * M; i++) {
resultr_private[i] = 0;
resulti_private[i] = 0;
}
#pragma omp for schedule(dynamic, 1)
for (k = 0; k < N; k++) {
starti = (int)ceil((xx[k] - Lx / 2 - minxi) / Kxstep);
if (starti < 0) {
starti = 0;
}
endi = (int)floor((xx[k] + Lx / 2 - minxi) / Kxstep);
if (endi >= L) {
endi = (L - 1);
}
/*mexPrintf("endi = %d \n",endi);*/
for (i = starti; i < endi + 1; i++) {
startj = (int)ceil((yy[k] - Ly / 2 - minyi) / Kystep);
if (startj < 0) {
startj = 0;
}
endj = (int)floor((yy[k] + Ly / 2 - minyi) / Kystep);
if (endj >= M) {
endj = (M - 1);
}
/* mexPrintf("yy = %g, start = %d, end = %d \n",yy[k],startj,endj);*/
for (j = startj; j < endj + 1; j++) {
/* mexPrintf("%d %d \n",i,j); */
distx = fabs(xi[i] - xx[k]);
/* mexPrintf(" %g ",distx); */
disty = fabs(yi[j] - yy[k]);
/* mexPrintf(" %g ",disty); */
/*if (distx > (Lx / 2)) {
mexPrintf("dx = %g, Lx = %g", distx, Lx);
}
if (disty > (Ly / 2)) {
mexPrintf("dy = %g, Ly = %g", disty, Ly);
}
*/
/* These operations were repeated, so using a variable to avoid the
* extra cycles */
kernelVal = 1.0 / Lx / Ly *
bessi0(Bx * sqrt(1 - 4 * distx * distx / Lx / Lx)) *
bessi0(By * sqrt(1 - 4 * disty * disty / Ly / Ly));
resultr_private[M * i + j] += zzr[k] * kernelVal;
if (flag) {
resulti_private[M * i + j] += zzi[k] * kernelVal;
}
/* weight[M*i+j]+=1/Lx/Ly*bessi0(Bx*sqrt(1-4*distx*distx/Lx/Lx))*bessi0(By*sqrt(1-4*disty*disty/Ly/Ly));
if ((i=44) && (j=0)){
mexPrintf("dx = %g, dy = %g",distx, disty);}*/
}
}
}
#pragma omp critical
{
for (i = 0; i < L * M; i++) {
resultr[i] += resultr_private[i];
resulti[i] += resulti_private[i];
}
}
free(resultr_private);
free(resulti_private);
}
/* for(i=0;i<L;i++){
for(j=0;j<M;j++){
if(weight[M*i+j] >= 0.000001){
resultr[M*i+j]=resultr[M*i+j]/weight[M*i+j];
resulti[M*i+j]=resulti[M*i+j]/weight[M*i+j];
}
}
}
*/
} else {
mexErrMsgTxt("\ninvalid kernel name");
}
mxFree(opt);
}
|
declare-target-2.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
extern int a;
#pragma omp declare target
#pragma omp declare target to (a)
#pragma omp end declare target
int b;
#pragma omp declare target to (b) link (b) /* { dg-error "appears more than once on the same .declare target. directive" } */
int c;
#pragma omp declare target (c)
#pragma omp declare target link (c) /* { dg-error "specified both in declare target" } */
int foo (void);
#pragma omp declare target link (foo) /* { dg-error "is not a variable in clause" } */
struct S;
extern struct S d[]; /* { dg-error "array type has incomplete element type" "" { target c } } */
#pragma omp declare target to (d) /* { dg-error "does not have a mappable type in" } */
extern struct S e;
#pragma omp declare target link (e) /* { dg-error "does not have a mappable type in" } */
extern int f[];
#pragma omp declare target to (f) /* { dg-error "does not have a mappable type in" } */
int g, h;
#pragma omp threadprivate (g, h)
#pragma omp declare target to (g) /* { dg-error "is threadprivate variable in" } */
#pragma omp declare target link (h) /* { dg-error "is threadprivate variable in" } */
int j[10];
#pragma omp declare target to (j[0:4]) /* { dg-error "expected" } */
int k, l;
#pragma omp declare target
int m;
#pragma omp end declare target
#pragma omp declare target to (k)
#pragma omp declare target (k)
#pragma omp declare target to (k, m) link (l)
#pragma omp declare target link (l)
int n, o, s, t;
#pragma omp declare target to (n) to (n) /* { dg-error "appears more than once on the same .declare target. directive" } */
#pragma omp declare target link (o, o) /* { dg-error "appears more than once on the same .declare target. directive" } */
#pragma omp declare target (s, t, s) /* { dg-error "appears more than once on the same .declare target. directive" } */
int p, q, r;
#pragma omp declare target (p) to (q) /* { dg-error "expected end of line before .to." } */
#pragma omp declare target to (p) (q) link (r) /* { dg-error "expected .#pragma omp. clause before" } */
#pragma omp declare target link (r) (p) /* { dg-error "expected .#pragma omp. clause before" } */
#pragma omp declare target
#pragma omp end declare target to (p) /* { dg-error "expected end of line before .to." } */
|
driver.c | /*
Copyright (C) 2014, The University of Texas at Austin
This file is part of libflame and is available under the 3-Clause
BSD license, which can be found in the LICENSE file at the top-level
directory, or at http://opensource.org/licenses/BSD-3-Clause
*/
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "omp.h"
#include "FLAME.h"
#include "LU_prototypes.h"
#define ENABLE_OMP_THREADS 1
int main(int argc, char *argv[])
{
int
n, nfirst, nlast, ninc, i, irep,
nrepeats, nb_alg;
double
dtime,
dtime_best,
gflops,
max_gflops,
diff,
d_n;
FLA_Obj
A[3], Aref, Aold, delta;
/* Initialize FLAME */
FLA_Init( );
/* Every time trial is repeated "repeat" times and the fastest run in recorded */
printf( "%% number of repeats:" );
//scanf( "%d", &nrepeats );
nrepeats = 3;
printf( "%% %d\n", nrepeats );
/* Enter the max GFLOPS attainable
This is used to set the y-axis range for the graphs. Here is how
you figure out what to enter (on Linux machines):
1) more /proc/cpuinfo (this lists the contents of this file).
2) read through this and figure out the clock rate of the machine (in GHz).
3) Find out (from an expert of from the web) the number of floating point
instructions that can be performed per core per clock cycle.
4) Figure out if you are using "multithreaded BLAS" which automatically
parallelize calls to the Basic Linear Algebra Subprograms. If so,
check how many cores are available.
5) Multiply 2) x 3) x 4) and enter this in response to the below.
*/
printf( "%% enter max GFLOPS:" );
//scanf( "%lf", &max_gflops );
max_gflops = 6.8;
printf( "%% %lf\n", max_gflops );
/* Enter the algorithmic block size */
printf( "%% enter nb_alg:" );
//scanf( "%d", &nb_alg );
nb_alg = 128;
printf( "%% %d\n", nb_alg );
/* Turn on parameter checking */
FLA_Check_error_level_set( FLA_FULL_ERROR_CHECKING );
/* Timing trials for matrix sizes n=nfirst to nlast in increments
of ninc will be performed */
printf( "%% enter nfirst, nlast, ninc:" );
//scanf( "%d%d%d", &nfirst, &nlast, &ninc );
nfirst = 50; nlast = 500; ninc = 50;
printf( "%% %d %d %d\n", nfirst, nlast, ninc );
i = 1;
for ( n=nfirst; n<= nlast; n+=ninc ){
/* Allocate space for the matrices */
FLA_Obj_create( FLA_DOUBLE, n, n, 0, 0, &A[0] );
FLA_Obj_create( FLA_DOUBLE, n, n, 0, 0, &A[1] );
FLA_Obj_create( FLA_DOUBLE, n, n, 0, 0, &A[2] );
FLA_Obj_create( FLA_DOUBLE, n, n, 0, 0, &Aref );
FLA_Obj_create( FLA_DOUBLE, n, n, 0, 0, &Aold );
FLA_Obj_create( FLA_DOUBLE, 1, 1, 0, 0, &delta );
/* Generate random matrix A and save in Aold */
FLA_Random_matrix( Aold );
/* Add something large to the diagonal to make sure it isn't ill-conditionsed */
d_n = ( double ) n;
*( ( double * ) FLA_Obj_base_buffer( delta ) ) = d_n;
FLA_Shift_diag( FLA_NO_CONJUGATE, delta, Aold );
/* Set gflops = billions of floating point operations that will be performed */
gflops = 2.0/3.0 * n * n * n * 1.0e-09;
/* Time the reference implementation */
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Copy( Aold, Aref );
dtime = FLA_Clock();
REF_LU( Aref );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
}
printf( "data_REF( %d, 1:2 ) = [ %d %le ];\n", i, n,
gflops / dtime_best );
fflush( stdout );
FLA_Finalize( );
/* Time FLA_LU */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
/* Turn on parameter checking */
FLA_Check_error_level_set( FLA_FULL_ERROR_CHECKING );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
FLA_LU_nopiv( A[irep] );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
printf( "data_FLAME( %d, 1:2 ) = [ %d %le ];\n", i, n,
gflops / dtime_best );
fflush( stdout );
/* Time the your implementations */
/* Variant 1 unblocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_unb_var1( A[irep] );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_unb_var1( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 1 blocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_blk_var1( A[irep], nb_alg );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_blk_var1( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 2 unblocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_unb_var2( A[irep] );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_unb_var2( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 2 blocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_blk_var2( A[irep], nb_alg );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_blk_var2( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 3 unblocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_unb_var3( A[irep] );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_unb_var3( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 3 blocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_blk_var3( A[irep], nb_alg );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_blk_var3( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 4 unblocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_unb_var4( A[irep] );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_unb_var4( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 4 blocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_blk_var4( A[irep], nb_alg );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_blk_var4( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 5 unblocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_unb_var5( A[irep] );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_unb_var5( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
/* Variant 5 blocked */
#if ENABLE_OMP_THREADS
#pragma omp parallel
#pragma omp for
#endif
for ( irep=0; irep<nrepeats; irep++ ){
FLA_Init( );
FLA_Copy( Aold, A[irep] );
dtime = FLA_Clock();
LU_blk_var5( A[irep], nb_alg );
dtime = FLA_Clock() - dtime;
if ( irep == 0 )
dtime_best = dtime;
else
dtime_best = ( dtime < dtime_best ? dtime : dtime_best );
FLA_Finalize( );
}
diff = FLA_Max_elemwise_diff( A[0], Aref );
diff += FLA_Max_elemwise_diff( A[1], Aref );
diff += FLA_Max_elemwise_diff( A[2], Aref );
diff = diff / 3.0;
printf( "data_blk_var5( %d, 1:3 ) = [ %d %le %le];\n", i, n,
gflops / dtime_best, diff );
fflush( stdout );
FLA_Obj_free( &A[0] );
FLA_Obj_free( &A[1] );
FLA_Obj_free( &A[2] );
FLA_Obj_free( &Aold );
FLA_Obj_free( &Aref );
FLA_Obj_free( &delta );
printf( "\n" );
i++;
}
/* Print the MATLAB commands to plot the data */
/* Delete all existing figures */
printf( "close all\n" );
/* Plot the performance of FLAME */
printf( "plot( data_FLAME( :,1 ), data_FLAME( :, 2 ), 'k--' ); \n" );
/* Indicate that you want to add to the existing plot */
printf( "hold on\n" );
/* Plot the performance of the reference implementation */
printf( "plot( data_REF( :,1 ), data_REF( :, 2 ), 'k-' ); \n" );
/* Plot the performance of your implementations */
printf( "plot( data_unb_var1( :,1 ), data_unb_var1( :, 2 ), 'r-.' ); \n" );
printf( "plot( data_unb_var2( :,1 ), data_unb_var2( :, 2 ), 'g-.' ); \n" );
printf( "plot( data_unb_var3( :,1 ), data_unb_var3( :, 2 ), 'b-.' ); \n" );
printf( "plot( data_unb_var4( :,1 ), data_unb_var4( :, 2 ), 'm-.' ); \n" );
printf( "plot( data_unb_var5( :,1 ), data_unb_var5( :, 2 ), 'c-.' ); \n" );
printf( "plot( data_blk_var1( :,1 ), data_blk_var1( :, 2 ), 'r--' ); \n" );
printf( "plot( data_blk_var2( :,1 ), data_blk_var2( :, 2 ), 'g--' ); \n" );
printf( "plot( data_blk_var3( :,1 ), data_blk_var3( :, 2 ), 'b--' ); \n" );
printf( "plot( data_blk_var4( :,1 ), data_blk_var4( :, 2 ), 'm--' ); \n" );
printf( "plot( data_blk_var5( :,1 ), data_blk_var5( :, 2 ), 'c--' ); \n" );
printf( "hold on \n");
printf( "xlabel( 'matrix dimension m=n' );\n");
printf( "ylabel( 'GFLOPS/sec.' );\n");
printf( "axis( [ 0 %d 0 %3.1f ] ); \n", nlast, max_gflops );
printf( "legend( 'FLA LU nopiv', ...\n");
printf( " 'Simple loops', ...\n");
printf( " 'unb var1', ...\n");
printf( " 'unb var2', ...\n");
printf( " 'unb var3', ...\n");
printf( " 'unb var4', ...\n");
printf( " 'unb var5', ...\n");
printf( " 'blk var1', ...\n");
printf( " 'blk var2', ...\n");
printf( " 'blk var3', ...\n");
printf( " 'blk var4', ...\n");
printf( " 'blk var5', 2);\n");
printf( "print -r100 -depsc LU.eps\n");
FLA_Finalize( );
return 0;
}
|
GB_binop__min_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__min_int8
// A.*B function (eWiseMult): GB_AemultB__min_int8
// A*D function (colscale): GB_AxD__min_int8
// D*A function (rowscale): GB_DxB__min_int8
// C+=B function (dense accum): GB_Cdense_accumB__min_int8
// C+=b function (dense accum): GB_Cdense_accumb__min_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_int8
// C=scalar+B GB_bind1st__min_int8
// C=scalar+B' GB_bind1st_tran__min_int8
// C=A+scalar GB_bind2nd__min_int8
// C=A'+scalar GB_bind2nd_tran__min_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IMIN (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT8 || GxB_NO_MIN_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__min_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__min_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__min_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__min_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__min_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__min_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__min_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__min_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__min_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__min_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB_bind1st_tran__min_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__min_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallel_measurement.c | #include<stdio.h>
#include<math.h>
#include<omp.h>
#include<time.h>
#include<string.h>
#include<stdlib.h>
int nborSize;
int halfwidth = 3;
int p;
// Using the MONOTONIC clock
#define CLK CLOCK_MONOTONIC
struct timespec diff(struct timespec start, struct timespec end){
struct timespec temp;
if((end.tv_nsec-start.tv_nsec)<0){
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
typedef struct {
unsigned char red,green,blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
typedef struct {
unsigned char gs;
} PPMPixelGS;
typedef struct {
int x, y;
PPMPixelGS *data;
} PPMImageGS;
#define RGB_COMPONENT_COLOR 255
int red_neighbours[1024];
int blue_neighbours[1024];
int green_neighbours[1024];
int getNbors(PPMImage * im,int x,int y) {
int i,j,count=0,m,n;
int rows = im->x;
int cols = im->y;
for(i = x - halfwidth; i <= x + halfwidth; i++){
m = i;
if(i < 0)
m = 0;
else if(i >= rows)
m = rows - 1;
for(j = y - halfwidth; j <= y + halfwidth; j++){
n = j;
if(j < 0)
n = 0;
else if(j >= cols)
n = cols - 1;
int idx1 = (m * cols) + n;
PPMPixel *temp1 = im->data + idx1;
red_neighbours[count] = temp1->red;
green_neighbours[count] = temp1->green;
blue_neighbours[count] = temp1->blue;
count++;
}
}
return 0;
}
int cmpfunc (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );
}
PPMImage* changeImage(PPMImage * im)
{
int rows = im->x;
int cols = im->y;
int x, y, halfWidth;
PPMImage *im2 = (PPMImage*) malloc(sizeof(PPMImage));
im2->x = rows;
im2->y = cols;
im2->data = (PPMPixel *) malloc(rows*cols*sizeof(PPMPixel));
int qq = 1;
omp_set_num_threads(p);
#pragma omp parallel for private(x,y) firstprivate(red_neighbours,green_neighbours,blue_neighbours) shared(im,im2,rows,cols,nborSize,halfwidth)
for (x = 0; x < rows; x++) {
for (y = 0; y < cols; y++) {
int i, j, m, n;
int count = 0;
for(i = x - halfwidth; i <= x + halfwidth; i++){
m = i;
if(i < 0)
m = 0;
else if(i >= rows)
m = rows - 1;
for(j = y - halfwidth; j <= y + halfwidth; j++){
n = j;
if(j < 0)
n = 0;
else if(j >= cols)
n = cols - 1;
int idx1 = (m * cols) + n;
PPMPixel *temp1 = im->data + idx1;
red_neighbours[count] = temp1->red;
green_neighbours[count] = temp1->green;
blue_neighbours[count] = temp1->blue;
count++;
}
}
int ii=0;
qsort(red_neighbours, nborSize, sizeof(int), cmpfunc);
qsort(green_neighbours, nborSize, sizeof(int), cmpfunc);
qsort(blue_neighbours, nborSize, sizeof(int), cmpfunc);
int index = (x * cols) + y;
PPMPixel *temp2 = im2->data + index;
temp2->red = red_neighbours[(nborSize/2)+1];
temp2->green = green_neighbours[(nborSize/2)+1];
temp2->blue = blue_neighbours[(nborSize/2)+1];
}
}
return im2;
}
static PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
//open PPM file for reading
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
//read image format
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
//check the image format
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
//alloc memory form image
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
//check for comments
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n') ;
c = getc(fp);
}
ungetc(c, fp);
//read image size information
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
//read rgb component
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
//check rgb component depth
if (rgb_comp_color!= RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n') ;
//memory allocation for pixel data
img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
//read pixel data from file
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(const char *filename, PPMImage *img)
{
FILE *fp;
//open file for output
fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
//write the header file
//image format
fprintf(fp, "P6\n");
//comments
//image size
fprintf(fp, "%d %d\n",img->x,img->y);
// rgb component depth
fprintf(fp, "%d\n",255);
// pixel data
fwrite(img->data, 3 * img->x, img->y, fp);
fclose(fp);
}
int main(int argc, char* argv[])
{
struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg;
/* Should start before anything else */
clock_gettime(CLK, &start_e2e);
/* Check if enough command-line arguments are taken in. */
if(argc < 3){
printf( "Usage: %s n p \n", argv[0] );
return -1;
}
int n=atoi(argv[1]); /* size of input array */
p=atoi(argv[2]); /* number of processors*/
char *problem_name = "median_filtering";
char *approach_name = "qsort";
FILE* outputFile;
char* c=argv[1];
char* str="../../Lenna";
char* str2=malloc(15);
strcpy(str2,str);
strcat(str2,c);
char* str3=".ppm";
strcat(str2,str3);
char* filename=str2;
PPMImage *im;
im = readPPM(filename);
char outputFileName[50];
sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]);
clock_gettime(CLK, &start_alg); /* Start the algo timer */
/*----------------------Core algorithm starts here----------------------------------------------*/
double start_time = omp_get_wtime();
PPMImage* im2 = changeImage(im);
double end_time = omp_get_wtime();
/*----------------------Core algorithm finished--------------------------------------------------*/
clock_gettime(CLK, &end_alg); /* End the algo timer */
/* Ensure that only the algorithm is present between these two
timers. Further, the whole algorithm should be present. */
char outputImageName[1024];
outputImageName[0] = '\0';
strcat(outputImageName, "../../Lenna_");
strcat(outputImageName, argv[1]);
strcat(outputImageName,"_filtering_parallel.ppm");
writePPM(outputImageName,im2);
/* Should end before anything else (printing comes later) */
clock_gettime(CLK, &end_e2e);
e2e = diff(start_e2e, end_e2e);
alg = diff(start_alg, end_alg);
printf("%s,%s,%d,%d,%d,%ld,%d,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec);
return 0;
}
|
bli_dotv_opt_var1.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas at Austin nor the names
of its contributors may be used to endorse or promote products
derived derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_opt_var1(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
00069-vb-pp.c |
void f()
{
if (0)
#pragma omp atomic
{
i++;
}
}
void f()
{
if (0)
#if foo
{
i++;
}
#else
{
i += 2;
}
#endif
}
void f()
{
while (108)
{
if (42)
#pragma omp critical
{ }
if (23)
#pragma omp critical
{
++i;
}
while (16)
{
}
int i = 15;
if (8)
#pragma omp atomic
{
i += 4;
}
}
}
|
nesting-1.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
f1 (void)
{
int i, j;
#pragma omp for
for (i = 0; i < 3; i++)
{
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-error "may not be closely nested" } */
;
#pragma omp master /* { dg-error "may not be closely nested" } */
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp sections
{
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
}
#pragma omp sections
{
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
}
#pragma omp sections
{
#pragma omp single /* { dg-error "may not be closely nested" } */
;
}
#pragma omp sections
{
#pragma omp master /* { dg-error "may not be closely nested" } */
;
}
#pragma omp sections
{
#pragma omp section
;
}
#pragma omp sections
{
#pragma omp section
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
}
#pragma omp sections
{
#pragma omp section
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
}
#pragma omp sections
{
#pragma omp section
#pragma omp single /* { dg-error "may not be closely nested" } */
;
}
#pragma omp sections
{
#pragma omp section
#pragma omp master /* { dg-error "may not be closely nested" } */
;
}
#pragma omp single
{
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-error "may not be closely nested" } */
;
#pragma omp master /* { dg-error "may not be closely nested" } */
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp master
{
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-error "may not be closely nested" } */
;
#pragma omp master
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp task
{
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-error "may not be closely nested" } */
;
#pragma omp master /* { dg-error "may not be closely nested" } */
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp parallel
{
#pragma omp for
for (j = 0; j < 3; j++)
;
#pragma omp sections
{
;
#pragma omp section
;
}
#pragma omp single
;
#pragma omp master
;
#pragma omp barrier
}
}
void
f2 (void)
{
int i, j;
#pragma omp ordered
{
#pragma omp for /* { dg-error "may not be closely nested" } */
for (j = 0; j < 3; j++)
;
#pragma omp sections /* { dg-error "may not be closely nested" } */
{
;
#pragma omp section
;
}
#pragma omp single /* { dg-error "may not be closely nested" } */
;
#pragma omp master
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
}
void
f3 (void)
{
#pragma omp critical
{
#pragma omp ordered /* { dg-error "may not be closely nested" } */
;
}
}
void
f4 (void)
{
#pragma omp task
{
#pragma omp ordered /* { dg-error "may not be closely nested" } */
;
}
}
void
f5 (void)
{
int i;
#pragma omp for
for (i = 0; i < 10; i++)
{
#pragma omp ordered /* { dg-error "must be closely nested" } */
;
}
#pragma omp for ordered
for (i = 0; i < 10; i++)
{
#pragma omp ordered
;
}
}
void
f6 (void)
{
#pragma omp critical (foo)
#pragma omp critical (bar)
;
#pragma omp critical
#pragma omp critical (baz)
;
}
void
f7 (void)
{
#pragma omp critical (foo2)
#pragma omp critical
;
#pragma omp critical (bar)
#pragma omp critical (bar) /* { dg-error "may not be nested" } */
;
#pragma omp critical
#pragma omp critical /* { dg-error "may not be nested" } */
;
}
|
partial.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
/*HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;*/
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
/*HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd; */
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
/*HYPRE_Int strong_f_marker = -2;*/
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i;
/*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Int max_num_threads;
HYPRE_Int *P_diag_array = NULL;
HYPRE_Int *P_offd_array = NULL;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
max_num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
/*my_first_old_cpt = num_old_cpts_global[my_id];*/
total_global_cpts = num_cpts_global[num_procs];
total_old_global_cpts = num_old_cpts_global[num_procs];
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[my_id+1] - num_old_cpts_global[my_id]);
/*n_coarse = num_cpts_global[my_id+1] - num_cpts_global[my_id];*/
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
/*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */
}
if (full_off_procNodes)
{
/*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);*/
for (i=0; i < full_off_procNodes; i++)
{
fine_to_coarse_offd[i] = -1;
tmp_CF_marker_offd[i] = -1;
}
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, diagonal, distribute, sgn, sum)
#endif
{
HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1;
HYPRE_BigInt big_k1;
HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd;
HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker;
HYPRE_Int size, rest, ne, ns;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
strong_f_marker = -2;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = n_coarse_old/num_threads;
rest = n_coarse_old - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size+1);
ne = (my_thread_num+1)*(size+1);
}
else
{
ns = my_thread_num*size+rest;
ne = (my_thread_num+1)*size+rest;
}
if (n_fine) P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (ii=0; ii < n_fine; ii++)
P_marker[ii] = -1;
if (full_off_procNodes) P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (ii=0; ii < full_off_procNodes; ii++)
P_marker_offd[ii] = -1;
/*coarse_counter = 0;
coarse_counter_offd = 0;*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;*/
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
/*coarse_counter++;*/
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
P_diag_array[my_thread_num] = jj_counter;
P_offd_array[my_thread_num] = jj_counter_offd;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
for (i=0; i < max_num_threads; i++)
{
P_diag_array[i+1] += P_diag_array[i];
P_offd_array[i+1] += P_offd_array[i];
}
P_diag_size = P_diag_array[max_num_threads];
P_offd_size = P_offd_array[max_num_threads];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = P_diag_size;
P_offd_i[n_coarse_old] = P_offd_size;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (my_thread_num)
{
jj_counter = P_diag_array[my_thread_num-1];
jj_counter_offd = P_offd_array[my_thread_num-1];
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
P_diag_i[ii] = jj_counter;
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
} /* end parallel region */
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa, beta;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, j1, jj, kk, k1;
HYPRE_BigInt big_k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
/*my_first_old_cpt = num_old_cpts_global[my_id];*/
total_global_cpts = num_cpts_global[num_procs];
total_old_global_cpts = num_old_cpts_global[num_procs];
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[my_id+1] - num_old_cpts_global[my_id]);
/*n_coarse = num_cpts_global[my_id+1] - num_cpts_global[my_id];*/
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
/*my_first_old_cpt = num_old_cpts_global[my_id];*/
total_global_cpts = num_cpts_global[num_procs];
total_old_global_cpts = num_old_cpts_global[num_procs];
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[my_id+1] - num_old_cpts_global[my_id]);
/*n_coarse = num_cpts_global[my_id+1] - num_cpts_global[my_id];*/
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row )
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
const int* num_per_col,
int num_sample_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>* ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
std::unordered_set<std::string> feature_name_set;
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check ascii
if (!Common::CheckASCII(feature_name)) {
Log::Fatal("Do not support non-ASCII characters in feature name.");
}
// check json
if (!Common::CheckAllowedJSON(feature_name)) {
Log::Fatal("Do not support special JSON characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
if (feature_name_set.count(feature_name) > 0) {
Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str());
}
feature_name_set.insert(feature_name);
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
HybridRepCenterOrbitals.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2019 QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
//////////////////////////////////////////////////////////////////////////////////////
/** @file HybridRepCenterOrbitals.h
*
* Hybrid representation atomic centered orbitals
*/
#ifndef QMCPLUSPLUS_HYBRIDREP_CENTER_ORBITALS_H
#define QMCPLUSPLUS_HYBRIDREP_CENTER_ORBITALS_H
#include <Particle/DistanceTableData.h>
#include <QMCWaveFunctions/LCAO/SoaSphericalTensor.h>
#include <spline2/MultiBspline1D.hpp>
#include <Numerics/SmoothFunctions.hpp>
namespace qmcplusplus
{
template<typename ST>
class AtomicOrbitals
{
public:
static const int D = 3;
using AtomicSplineType = typename bspline_traits<ST, 1>::SplineType;
using AtomicBCType = typename bspline_traits<ST, 1>::BCType;
using AtomicSingleSplineType = UBspline_1d_d;
using PointType = TinyVector<ST, D>;
using value_type = ST;
using vContainer_type = aligned_vector<ST>;
private:
// near core cutoff
ST rmin;
// far from core cutoff, rmin_sqrt>=rmin
ST rmin_sqrt;
ST cutoff, cutoff_buffer, spline_radius, non_overlapping_radius;
int spline_npoints, BaseN;
int NumBands, Npad;
PointType center_pos;
const int lmax, lm_tot;
SoaSphericalTensor<ST> Ylm;
vContainer_type l_vals;
vContainer_type r_power_minus_l;
///1D spline of radial functions of all the orbitals
std::shared_ptr<MultiBspline1D<ST>> SplineInst;
vContainer_type localV, localG, localL;
public:
AtomicOrbitals(int Lmax) : lmax(Lmax), lm_tot((Lmax + 1) * (Lmax + 1)), Ylm(Lmax)
{
r_power_minus_l.resize(lm_tot);
l_vals.resize(lm_tot);
for (int l = 0; l <= lmax; l++)
for (int m = -l; m <= l; m++)
l_vals[l * (l + 1) + m] = l;
rmin = std::exp(std::log(std::numeric_limits<ST>::min()) / std::max(Lmax, 1));
rmin = std::max(rmin, std::numeric_limits<ST>::epsilon());
rmin_sqrt = std::max(rmin, std::sqrt(std::numeric_limits<ST>::epsilon()));
}
// accessing functions, const only
ST getCutoff() const { return cutoff; }
ST getCutoffBuffer() const { return cutoff_buffer; }
ST getSplineRadius() const { return spline_radius; }
ST getNonOverlappingRadius() const { return non_overlapping_radius; }
int getSplineNpoints() const { return spline_npoints; }
int getLmax() const { return lmax; }
const PointType& getCenterPos() const { return center_pos; }
inline void resizeStorage(size_t Nb)
{
NumBands = Nb;
Npad = getAlignedSize<ST>(Nb);
localV.resize(Npad * lm_tot);
localG.resize(Npad * lm_tot);
localL.resize(Npad * lm_tot);
create_spline();
}
void bcast_tables(Communicate* comm) { chunked_bcast(comm, SplineInst->getSplinePtr()); }
void gather_tables(Communicate* comm, std::vector<int>& offset)
{
gatherv(comm, SplineInst->getSplinePtr(), Npad, offset);
}
template<typename PT, typename VT>
inline void set_info(const PT& R,
const VT& cutoff_in,
const VT& cutoff_buffer_in,
const VT& spline_radius_in,
const VT& non_overlapping_radius_in,
const int spline_npoints_in)
{
center_pos[0] = R[0];
center_pos[1] = R[1];
center_pos[2] = R[2];
cutoff = cutoff_in;
cutoff_buffer = cutoff_buffer_in;
spline_radius = spline_radius_in;
spline_npoints = spline_npoints_in;
non_overlapping_radius = non_overlapping_radius_in;
BaseN = spline_npoints + 2;
}
inline void create_spline()
{
AtomicBCType bc;
bc.lCode = FLAT;
bc.rCode = NATURAL;
Ugrid grid;
grid.start = 0.0;
grid.end = spline_radius;
grid.num = spline_npoints;
SplineInst = std::make_shared<MultiBspline1D<ST>>();
SplineInst->create(grid, bc, lm_tot * Npad);
}
inline size_t getSplineSizeInBytes() const { return SplineInst->sizeInByte(); }
inline void flush_zero() { SplineInst->flush_zero(); }
inline void set_spline(AtomicSingleSplineType* spline, int lm, int ispline)
{
SplineInst->copy_spline(spline, lm * Npad + ispline, 0, BaseN);
}
bool read_splines(hdf_archive& h5f)
{
einspline_engine<AtomicSplineType> bigtable(SplineInst->getSplinePtr());
int lmax_in, spline_npoints_in;
ST spline_radius_in;
if (!h5f.readEntry(lmax_in, "l_max") || lmax_in != lmax)
return false;
if (!h5f.readEntry(spline_radius_in, "spline_radius") || spline_radius_in != spline_radius)
return false;
if (!h5f.readEntry(spline_npoints_in, "spline_npoints") || spline_npoints_in != spline_npoints)
return false;
return h5f.readEntry(bigtable, "radial_spline");
}
bool write_splines(hdf_archive& h5f)
{
bool success = true;
success = success && h5f.writeEntry(spline_radius, "spline_radius");
success = success && h5f.writeEntry(spline_npoints, "spline_npoints");
success = success && h5f.writeEntry(lmax, "l_max");
success = success && h5f.writeEntry(center_pos, "position");
einspline_engine<AtomicSplineType> bigtable(SplineInst->getSplinePtr());
success = success && h5f.writeEntry(bigtable, "radial_spline");
return success;
}
//evaluate only V
template<typename VV>
inline void evaluate_v(const ST& r, const PointType& dr, VV& myV)
{
if (r > std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(dr[0] / r, dr[1] / r, dr[2] / r);
else
Ylm.evaluateV(0, 0, 1);
const ST* restrict Ylm_v = Ylm[0];
constexpr ST czero(0);
ST* restrict val = myV.data();
ST* restrict local_val = localV.data();
std::fill(myV.begin(), myV.end(), czero);
SplineInst->evaluate(r, localV);
for (size_t lm = 0; lm < lm_tot; lm++)
{
#pragma omp simd aligned(val, local_val)
for (size_t ib = 0; ib < myV.size(); ib++)
val[ib] += Ylm_v[lm] * local_val[ib];
local_val += Npad;
}
}
template<typename DISPL, typename VM>
inline void evaluateValues(const DISPL& Displacements, const int center_idx, const ST& r, VM& multi_myV)
{
if (r <= std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(0, 0, 1);
const ST* restrict Ylm_v = Ylm[0];
const size_t m = multi_myV.cols();
constexpr ST czero(0);
std::fill(multi_myV.begin(), multi_myV.end(), czero);
SplineInst->evaluate(r, localV);
for (int ivp = 0; ivp < Displacements.size(); ivp++)
{
PointType dr = Displacements[ivp][center_idx];
if (r > std::numeric_limits<ST>::epsilon())
Ylm.evaluateV(-dr[0] / r, -dr[1] / r, -dr[2] / r);
ST* restrict val = multi_myV[ivp];
ST* restrict local_val = localV.data();
for (size_t lm = 0; lm < lm_tot; lm++)
{
#pragma omp simd aligned(val, local_val)
for (size_t ib = 0; ib < m; ib++)
val[ib] += Ylm_v[lm] * local_val[ib];
local_val += Npad;
}
}
}
//evaluate VGL
template<typename VV, typename GV>
inline void evaluate_vgl(const ST& r, const PointType& dr, VV& myV, GV& myG, VV& myL)
{
ST drx, dry, drz, rhatx, rhaty, rhatz, rinv;
if (r > rmin)
{
rinv = 1.0 / r;
}
else
{
rinv = 0;
}
drx = dr[0];
dry = dr[1];
drz = dr[2];
rhatx = drx * rinv;
rhaty = dry * rinv;
rhatz = drz * rinv;
Ylm.evaluateVGL(drx, dry, drz);
const ST* restrict Ylm_v = Ylm[0];
const ST* restrict Ylm_gx = Ylm[1];
const ST* restrict Ylm_gy = Ylm[2];
const ST* restrict Ylm_gz = Ylm[3];
ST* restrict g0 = myG.data(0);
ST* restrict g1 = myG.data(1);
ST* restrict g2 = myG.data(2);
constexpr ST czero(0), cone(1), chalf(0.5);
std::fill(myV.begin(), myV.end(), czero);
std::fill(g0, g0 + Npad, czero);
std::fill(g1, g1 + Npad, czero);
std::fill(g2, g2 + Npad, czero);
std::fill(myL.begin(), myL.end(), czero);
ST* restrict val = myV.data();
ST* restrict lapl = myL.data();
ST* restrict local_val = localV.data();
ST* restrict local_grad = localG.data();
ST* restrict local_lapl = localL.data();
SplineInst->evaluate_vgl(r, localV, localG, localL);
if (r > rmin_sqrt)
{
// far from core
r_power_minus_l[0] = cone;
ST r_power_temp = cone;
for (int l = 1; l <= lmax; l++)
{
r_power_temp *= rinv;
for (int m = -l, lm = l * l; m <= l; m++, lm++)
r_power_minus_l[lm] = r_power_temp;
}
for (size_t lm = 0; lm < lm_tot; lm++)
{
const ST& l_val = l_vals[lm];
const ST& r_power = r_power_minus_l[lm];
const ST Ylm_rescale = Ylm_v[lm] * r_power;
const ST rhat_dot_G = (rhatx * Ylm_gx[lm] + rhaty * Ylm_gy[lm] + rhatz * Ylm_gz[lm]) * r_power;
#pragma omp simd aligned(val, g0, g1, g2, lapl, local_val, local_grad, local_lapl)
for (size_t ib = 0; ib < myV.size(); ib++)
{
const ST local_v = local_val[ib];
const ST local_g = local_grad[ib];
const ST local_l = local_lapl[ib];
// value
const ST Vpart = l_val * rinv * local_v;
val[ib] += Ylm_rescale * local_v;
// grad
const ST factor1 = local_g * Ylm_rescale;
const ST factor2 = local_v * r_power;
const ST factor3 = -Vpart * Ylm_rescale;
g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx;
g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty;
g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz;
// laplacian
lapl[ib] += (local_l + (local_g * (2 - l_val) - Vpart) * rinv) * Ylm_rescale + (local_g - Vpart) * rhat_dot_G;
}
local_val += Npad;
local_grad += Npad;
local_lapl += Npad;
}
}
else if (r > rmin)
{
// the possibility of reaching here is very very low
std::cout << "Warning: an electron is very close to an ion, distance=" << r << " be careful!" << std::endl;
// near core, kill divergence in the laplacian
r_power_minus_l[0] = cone;
ST r_power_temp = cone;
for (int l = 1; l <= lmax; l++)
{
r_power_temp *= rinv;
for (int m = -l, lm = l * l; m <= l; m++, lm++)
r_power_minus_l[lm] = r_power_temp;
}
for (size_t lm = 0; lm < lm_tot; lm++)
{
const ST& l_val = l_vals[lm];
const ST& r_power = r_power_minus_l[lm];
const ST Ylm_rescale = Ylm_v[lm] * r_power;
const ST rhat_dot_G = (Ylm_gx[lm] * rhatx + Ylm_gy[lm] * rhaty + Ylm_gz[lm] * rhatz) * r_power * r;
#pragma omp simd aligned(val, g0, g1, g2, lapl, local_val, local_grad, local_lapl)
for (size_t ib = 0; ib < myV.size(); ib++)
{
const ST local_v = local_val[ib];
const ST local_g = local_grad[ib];
const ST local_l = local_lapl[ib];
// value
const ST Vpart = Ylm_rescale * local_v;
val[ib] += Vpart;
// grad
const ST factor1 = local_g * Ylm_rescale;
const ST factor2 = local_v * r_power;
const ST factor3 = -l_val * Vpart * rinv;
g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx;
g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty;
g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz;
// laplacian
lapl[ib] += local_l * (cone - chalf * l_val) * (3 * Ylm_rescale + rhat_dot_G);
}
local_val += Npad;
local_grad += Npad;
local_lapl += Npad;
}
}
else
{
std::cout << "Warning: an electron is on top of an ion!" << std::endl;
// strictly zero
#pragma omp simd aligned(val, lapl, local_val, local_lapl)
for (size_t ib = 0; ib < myV.size(); ib++)
{
// value
val[ib] = Ylm_v[0] * local_val[ib];
// laplacian
lapl[ib] = local_lapl[ib] * static_cast<ST>(3) * Ylm_v[0];
}
local_val += Npad;
local_grad += Npad;
local_lapl += Npad;
if (lm_tot > 0)
{
//std::cout << std::endl;
for (size_t lm = 1; lm < 4; lm++)
{
#pragma omp simd aligned(g0, g1, g2, local_grad)
for (size_t ib = 0; ib < myV.size(); ib++)
{
const ST local_g = local_grad[ib];
// grad
g0[ib] += local_g * Ylm_gx[lm];
g1[ib] += local_g * Ylm_gy[lm];
g2[ib] += local_g * Ylm_gz[lm];
}
local_grad += Npad;
}
}
}
}
template<typename VV, typename GV, typename HT>
void evaluate_vgh(const ST& r, const PointType& dr, VV& myV, GV& myG, HT& myH)
{
//Needed to do tensor product here
APP_ABORT("AtomicOrbitals::evaluate_vgh");
}
};
template<typename ST>
class HybridRepCenterOrbitals
{
public:
static const int D = 3;
using PointType = typename AtomicOrbitals<ST>::PointType;
using RealType = typename DistanceTableData::RealType;
using PosType = typename DistanceTableData::PosType;
private:
///atomic centers
std::vector<AtomicOrbitals<ST>> AtomicCenters;
///table index
int myTableID;
///mapping supercell to primitive cell
std::vector<int> Super2Prim;
///r from distance table
RealType dist_r;
///dr from distance table
PosType dist_dr;
///for APBC
PointType r_image;
///smooth function value
RealType f;
///smooth function first derivative
RealType df_dr;
///smooth function second derivative
RealType d2f_dr2;
///smoothing schemes
enum class smoothing_schemes
{
CONSISTENT = 0,
SMOOTHALL,
SMOOTHPARTIAL
} smooth_scheme;
/// smoothing function
smoothing_functions smooth_func_id;
public:
HybridRepCenterOrbitals() {}
void set_info(const ParticleSet& ions, ParticleSet& els, const std::vector<int>& mapping)
{
myTableID = els.addTable(ions);
Super2Prim = mapping;
}
inline void resizeStorage(size_t Nb)
{
size_t SplineCoefsBytes = 0;
for (int ic = 0; ic < AtomicCenters.size(); ic++)
{
AtomicCenters[ic].resizeStorage(Nb);
SplineCoefsBytes += AtomicCenters[ic].getSplineSizeInBytes();
}
app_log() << "MEMORY " << SplineCoefsBytes / (1 << 20) << " MB allocated "
<< "for the atomic radial splines in hybrid orbital representation" << std::endl;
}
void bcast_tables(Communicate* comm)
{
for (int ic = 0; ic < AtomicCenters.size(); ic++)
AtomicCenters[ic].bcast_tables(comm);
}
void gather_atomic_tables(Communicate* comm, std::vector<int>& offset)
{
if (comm->size() == 1)
return;
for (int ic = 0; ic < AtomicCenters.size(); ic++)
AtomicCenters[ic].gather_tables(comm, offset);
}
inline void flush_zero()
{
for (int ic = 0; ic < AtomicCenters.size(); ic++)
AtomicCenters[ic].flush_zero();
}
bool read_splines(hdf_archive& h5f)
{
bool success = true;
size_t ncenter;
success = success && h5f.push("atomic_centers", false);
success = success && h5f.readEntry(ncenter, "number_of_centers");
if (!success)
return success;
if (ncenter != AtomicCenters.size())
success = false;
// read splines of each center
for (int ic = 0; ic < AtomicCenters.size(); ic++)
{
std::ostringstream gname;
gname << "center_" << ic;
success = success && h5f.push(gname.str().c_str(), false);
success = success && AtomicCenters[ic].read_splines(h5f);
h5f.pop();
}
h5f.pop();
return success;
}
bool write_splines(hdf_archive& h5f)
{
bool success = true;
int ncenter = AtomicCenters.size();
success = success && h5f.push("atomic_centers", true);
success = success && h5f.writeEntry(ncenter, "number_of_centers");
// write splines of each center
for (int ic = 0; ic < AtomicCenters.size(); ic++)
{
std::ostringstream gname;
gname << "center_" << ic;
success = success && h5f.push(gname.str().c_str(), true);
success = success && AtomicCenters[ic].write_splines(h5f);
h5f.pop();
}
h5f.pop();
return success;
}
template<typename Cell>
inline int get_bc_sign(const PointType& r, const Cell& PrimLattice, TinyVector<int, D>& HalfG)
{
int bc_sign = 0;
PointType shift_unit = PrimLattice.toUnit(r - r_image);
for (int i = 0; i < D; i++)
{
ST img = round(shift_unit[i]);
bc_sign += HalfG[i] * (int)img;
}
return bc_sign;
}
//evaluate only V
template<typename VV>
inline RealType evaluate_v(const ParticleSet& P, const int iat, VV& myV)
{
const auto& ei_dist = P.getDistTable(myTableID);
const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat);
if (center_idx < 0)
abort();
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.getCutoff())
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image = myCenter.getCenterPos() + dr;
myCenter.evaluate_v(dist_r, dr, myV);
return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r);
}
return RealType(-1);
}
/* check if the batched algorithm is safe to operate
* @param VP virtual particle set
* @return true if it is safe
*
* When the reference electron in the NLPP evaluation has a distance larger than the non overlapping radius of the reference center.
* Some qudrature points may get its SPOs evaluated from the nearest center which is not the reference center.
* The batched algorthm forces the evaluation on the reference center and introduce some error.
* In this case, the non-batched algorithm should be used.
*/
bool is_batched_safe(const VirtualParticleSet& VP)
{
const int center_idx = VP.refSourcePtcl;
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
return VP.refPS.getDistTable(myTableID).getDistRow(VP.refPtcl)[center_idx] < myCenter.getNonOverlappingRadius();
}
// C2C, C2R cases
template<typename VM>
inline RealType evaluateValuesC2X(const VirtualParticleSet& VP, VM& multi_myV)
{
const int center_idx = VP.refSourcePtcl;
dist_r = VP.refPS.getDistTable(myTableID).getDistRow(VP.refPtcl)[center_idx];
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.getCutoff())
{
myCenter.evaluateValues(VP.getDistTable(myTableID).getDisplacements(), center_idx, dist_r, multi_myV);
return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r);
}
return RealType(-1);
}
// R2R case
template<typename VM, typename Cell, typename SV>
inline RealType evaluateValuesR2R(const VirtualParticleSet& VP,
const Cell& PrimLattice,
TinyVector<int, D>& HalfG,
VM& multi_myV,
SV& bc_signs)
{
const int center_idx = VP.refSourcePtcl;
dist_r = VP.refPS.getDistTable(myTableID).getDistRow(VP.refPtcl)[center_idx];
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.getCutoff())
{
const auto& displ = VP.getDistTable(myTableID).getDisplacements();
for (int ivp = 0; ivp < VP.getTotalNum(); ivp++)
{
r_image = myCenter.getCenterPos() - displ[ivp][center_idx];
bc_signs[ivp] = get_bc_sign(VP.R[ivp], PrimLattice, HalfG);
;
}
myCenter.evaluateValues(displ, center_idx, dist_r, multi_myV);
return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r);
}
return RealType(-1);
}
//evaluate only VGL
template<typename VV, typename GV>
inline RealType evaluate_vgl(const ParticleSet& P, const int iat, VV& myV, GV& myG, VV& myL)
{
const auto& ei_dist = P.getDistTable(myTableID);
const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat);
if (center_idx < 0)
abort();
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.getCutoff())
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image = myCenter.getCenterPos() + dr;
myCenter.evaluate_vgl(dist_r, dr, myV, myG, myL);
return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r);
}
return RealType(-1);
}
//evaluate only VGH
template<typename VV, typename GV, typename HT>
inline RealType evaluate_vgh(const ParticleSet& P, const int iat, VV& myV, GV& myG, HT& myH)
{
const auto& ei_dist = P.getDistTable(myTableID);
const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat);
if (center_idx < 0)
abort();
auto& myCenter = AtomicCenters[Super2Prim[center_idx]];
if (dist_r < myCenter.getCutoff())
{
PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]);
r_image = myCenter.getCenterPos() + dr;
myCenter.evaluate_vgh(dist_r, dr, myV, myG, myH);
return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r);
}
return RealType(-1);
}
// interpolate buffer region, value only
template<typename VV>
inline void interpolate_buffer_v(VV& psi, const VV& psi_AO) const
{
const RealType cone(1);
for (size_t i = 0; i < psi.size(); i++)
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
// interpolate buffer region, value, gradients and laplacian
template<typename VV, typename GV>
inline void interpolate_buffer_vgl(VV& psi,
GV& dpsi,
VV& d2psi,
const VV& psi_AO,
const GV& dpsi_AO,
const VV& d2psi_AO) const
{
const RealType cone(1), ctwo(2);
const RealType rinv(1.0 / dist_r);
if (smooth_scheme == smoothing_schemes::CONSISTENT)
for (size_t i = 0; i < psi.size(); i++)
{ // psi, dpsi, d2psi are all consistent
d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f) + df_dr * rinv * ctwo * dot(dpsi[i] - dpsi_AO[i], dist_dr) +
(psi_AO[i] - psi[i]) * (d2f_dr2 + ctwo * rinv * df_dr);
dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f) + df_dr * rinv * dist_dr * (psi[i] - psi_AO[i]);
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
else if (smooth_scheme == smoothing_schemes::SMOOTHALL)
for (size_t i = 0; i < psi.size(); i++)
{
d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f);
dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f);
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
else if (smooth_scheme == smoothing_schemes::SMOOTHPARTIAL)
for (size_t i = 0; i < psi.size(); i++)
{ // dpsi, d2psi are consistent but psi is not.
d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f) + df_dr * rinv * ctwo * dot(dpsi[i] - dpsi_AO[i], dist_dr);
dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f);
psi[i] = psi_AO[i] * f + psi[i] * (cone - f);
}
else
throw std::runtime_error("Unknown smooth scheme!");
}
inline RealType smooth_function(const ST& cutoff_buffer, const ST& cutoff, const RealType r)
{
const RealType cone(1);
if (r < cutoff_buffer)
return cone;
const RealType scale = cone / (cutoff - cutoff_buffer);
const RealType x = (r - cutoff_buffer) * scale;
f = smoothing(smooth_func_id, x, df_dr, d2f_dr2);
df_dr *= scale;
d2f_dr2 *= scale * scale;
return f;
}
template<class BSPLINESPO>
friend class HybridRepSetReader;
};
} // namespace qmcplusplus
#endif
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
#define TINYEXR_X86_OR_X64_CPU 1
#else
#define TINYEXR_X86_OR_X64_CPU 0
#endif
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU
#define TINYEXR_LITTLE_ENDIAN 1
#else
#define TINYEXR_LITTLE_ENDIAN 0
#endif
// Use miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#include "deps/miniz/miniz.h"
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef TINYEXR_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
union FP32 {
unsigned int u;
float f;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if TINYEXR_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int requested_pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].requested_pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
mz_ulong outSize = mz_compressBound(src_size);
int ret = mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !TINYEXR_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
} else if (info.type == "tiledimage") {
assert(exr_header->tiled);
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
} else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (channels[c].requested_pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->pixel_types[c];
info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.insert(std::string(exr_headers[i]->name));
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
spacetime_heat_hs_kernel_antiderivative.h | /*
Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of
Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of VSB - Technical University of Ostrava and Graz
University of Technology nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND
GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file spacetime_heat_hs_kernel_antiderivative.h
* @brief Kernel for uniform_spacetime_tensor_mesh.h.
*/
#ifndef INCLUDE_BESTHEA_SPACETIME_HEAT_HS_KERNEL_ANTIDERIVATIVE_H_
#define INCLUDE_BESTHEA_SPACETIME_HEAT_HS_KERNEL_ANTIDERIVATIVE_H_
#include <besthea/spacetime_heat_kernel_antiderivative.h>
#include "besthea/settings.h"
#include <vector>
namespace besthea {
namespace bem {
class spacetime_heat_hs_kernel_antiderivative;
}
}
/**
* Class representing a first and second antiderivative of the hypersingular
* spacetime kernel.
*/
class besthea::bem::spacetime_heat_hs_kernel_antiderivative
: public besthea::bem::spacetime_heat_kernel_antiderivative<
spacetime_heat_hs_kernel_antiderivative > {
public:
/**
* Constructor.
* @param[in] alpha Heat conductivity.
*/
spacetime_heat_hs_kernel_antiderivative( sc alpha )
: spacetime_heat_kernel_antiderivative<
spacetime_heat_hs_kernel_antiderivative >( alpha ) {
}
/**
* Destructor.
*/
virtual ~spacetime_heat_hs_kernel_antiderivative( ) {
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t( [[maybe_unused]] sc xy1, [[maybe_unused]] sc xy2,
[[maybe_unused]] sc xy3, [[maybe_unused]] const sc * nx,
[[maybe_unused]] const sc * ny, [[maybe_unused]] sc ttau ) const {
return _zero;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t_regular_in_time( [[maybe_unused]] sc xy1,
[[maybe_unused]] sc xy2, [[maybe_unused]] sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
[[maybe_unused]] sc ttau ) const {
return _zero;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t_regular_in_time_regular_in_space(
[[maybe_unused]] sc xy1, [[maybe_unused]] sc xy2, [[maybe_unused]] sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
[[maybe_unused]] sc ttau ) const {
return _zero;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
*/
#pragma omp declare simd uniform( this, nx, ny ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t_limit_in_time_regular_in_space( [[maybe_unused]] sc xy1,
[[maybe_unused]] sc xy2, [[maybe_unused]] sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny ) const {
return _zero;
}
/**
* ONLY NEEDED FOR POTENTIALS!
* Evaluates the first antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_regular( [[maybe_unused]] sc xy1, [[maybe_unused]] sc xy2,
[[maybe_unused]] sc xy3, [[maybe_unused]] const sc * nx,
[[maybe_unused]] const sc * ny, [[maybe_unused]] sc ttau ) const {
return _zero;
}
/**
* Evaluates the definite integral over the same time interval.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] t0 Start of interval.
* @param[in] t1 End of interval.
*/
#pragma omp declare simd uniform( this, nx, ny, t0, t1 ) simdlen( DATA_WIDTH )
sc do_definite_integral_over_same_interval( [[maybe_unused]] sc xy1,
[[maybe_unused]] sc xy2, [[maybe_unused]] sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
[[maybe_unused]] sc t0, [[maybe_unused]] sc t1 ) const {
return _zero;
}
/**
* Evaluates the definite integral over the different time intervals.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] t0 Start of interval in `t`.
* @param[in] t1 End of interval in `t`.
* @param[in] tau0 Start of interval in `tau`.
* @param[in] tau1 End of interval in `tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, t0, t1, tau0, tau1 ) \
simdlen( DATA_WIDTH )
sc do_definite_integral_over_different_intervals( [[maybe_unused]] sc xy1,
[[maybe_unused]] sc xy2, [[maybe_unused]] sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
[[maybe_unused]] sc t0, [[maybe_unused]] sc t1, [[maybe_unused]] sc tau0,
[[maybe_unused]] sc tau1 ) const {
return _zero;
}
/**
* Evaluates the first antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
*/
#pragma omp declare simd uniform( this, nx, ny ) simdlen( DATA_WIDTH )
sc do_anti_tau_limit( sc xy1, sc xy2, sc xy3, [[maybe_unused]] const sc * nx,
[[maybe_unused]] const sc * ny ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc value = _one / ( _four * _pi * _alpha * norm );
value *= _alpha2;
return value;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
* @param[out] value1 Return value for anti_tau_anti_t part.
* @param[out] value2 Return value for anti_t part.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
void anti_tau_anti_t_and_anti_t( sc xy1, sc xy2, sc xy3, const sc * nx,
const sc * ny, sc ttau, sc * value1, sc * value2 ) const {
sc dot = nx[ 0 ] * ny[ 0 ] + nx[ 1 ] * ny[ 1 ] + nx[ 2 ] * ny[ 2 ];
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
sc erf_value;
sc four_pi_alpha_norm;
sc pi_alpha_sqrtpi_sqrta;
if ( ttau > _eps ) {
pi_alpha_sqrtpi_sqrta = _pi * _alpha * _sqrt_pi * _sqrt_alpha;
if ( norm > _eps ) { // ttau > 0, norm > 0
erf_value = std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) );
four_pi_alpha_norm = _four * _pi * _alpha * norm;
*value1
= ( ttau / four_pi_alpha_norm + norm / ( _eight * _pi * _alpha2 ) )
* erf_value
+ sqrt_d / ( _four * _pi * _alpha * _sqrt_pi * _sqrt_alpha )
* std::exp( -( norm * norm ) / ( _four * ttau * _alpha ) );
*value2 = erf_value / four_pi_alpha_norm;
} else { // ttau > 0, limit for norm -> 0
*value1 = sqrt_d / ( _two * pi_alpha_sqrtpi_sqrta );
*value2 = _one / ( _four * pi_alpha_sqrtpi_sqrta * sqrt_d );
}
} else { // limit for ttau -> 0, assuming norm > 0
*value1 = norm / ( _eight * _pi * _alpha2 );
*value2 = _one / ( _four * _pi * _alpha * norm );
}
*value1 *= _alpha2;
*value2 *= dot * _alpha;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
* @param[out] value1 Return value for anti_tau_anti_t part.
* @param[out] value2 Return value for anti_t part.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
void anti_tau_anti_t_and_anti_t_regular_in_time( sc xy1, sc xy2, sc xy3,
const sc * nx, const sc * ny, sc ttau, sc * value1, sc * value2 ) const {
sc dot = nx[ 0 ] * ny[ 0 ] + nx[ 1 ] * ny[ 1 ] + nx[ 2 ] * ny[ 2 ];
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
if ( norm > _eps ) { // ttau > 0, norm > 0
sc erf_value = std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) );
sc four_pi_alpha_norm = _four * _pi * _alpha * norm;
*value1
= ( ttau / four_pi_alpha_norm + norm / ( _eight * _pi * _alpha2 ) )
* erf_value
+ sqrt_d / ( _four * _pi * _alpha * _sqrt_pi * _sqrt_alpha )
* std::exp( -( norm * norm ) / ( _four * ttau * _alpha ) );
*value2 = erf_value / four_pi_alpha_norm;
} else { // ttau > 0, limit for norm -> 0
sc pi_alpha_sqrtpi_sqrta = _pi * _alpha * _sqrt_pi * _sqrt_alpha;
*value1 = sqrt_d / ( _two * pi_alpha_sqrtpi_sqrta );
*value2 = _one / ( _four * pi_alpha_sqrtpi_sqrta * sqrt_d );
}
*value1 *= _alpha2;
*value2 *= dot * _alpha;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
* @param[out] value1 Return value for anti_tau_anti_t part.
* @param[out] value2 Return value for anti_t part.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
void anti_tau_anti_t_and_anti_t_regular_in_time_regular_in_space( sc xy1,
sc xy2, sc xy3, const sc * nx, const sc * ny, sc ttau, sc * value1,
sc * value2 ) const {
sc dot = nx[ 0 ] * ny[ 0 ] + nx[ 1 ] * ny[ 1 ] + nx[ 2 ] * ny[ 2 ];
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
sc erf_value = std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) );
sc four_pi_alpha_norm = _four * _pi * _alpha * norm;
// ttau > 0, norm > 0
*value1 = ( ttau / four_pi_alpha_norm + norm / ( _eight * _pi * _alpha2 ) )
* erf_value
+ sqrt_d / ( _four * _pi * _alpha * _sqrt_pi * _sqrt_alpha )
* std::exp( -( norm * norm ) / ( _four * ttau * _alpha ) );
*value2 = erf_value / four_pi_alpha_norm;
*value1 *= _alpha2;
*value2 *= dot * _alpha;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[out] value1 Return value for anti_tau_anti_t part.
* @param[out] value2 Return value for anti_t part.
*/
#pragma omp declare simd uniform( this, nx, ny ) simdlen( DATA_WIDTH )
void anti_tau_anti_t_and_anti_t_limit_in_time_regular_in_space( sc xy1,
sc xy2, sc xy3, const sc * nx, const sc * ny, sc * value1,
sc * value2 ) const {
sc dot = nx[ 0 ] * ny[ 0 ] + nx[ 1 ] * ny[ 1 ] + nx[ 2 ] * ny[ 2 ];
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
// limit for ttau -> 0, assuming norm > 0
*value1 = norm / ( _eight * _pi * _alpha2 );
*value2 = _one / ( _four * _pi * _alpha * norm );
*value1 *= _alpha2;
*value2 *= dot * _alpha;
}
/**
* Evaluates the definite integral over the same time interval.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] t0 Start of interval.
* @param[in] t1 End of interval.
* @param[out] value1 Return value for anti_tau_anti_t part.
* @param[out] value2 Return value for anti_t part.
*/
#pragma omp declare simd uniform( this, nx, ny, t0, t1 ) simdlen( DATA_WIDTH )
void definite_integral_over_same_interval( sc xy1, sc xy2, sc xy3,
const sc * nx, const sc * ny, sc t0, sc t1, sc * value1,
sc * value2 ) const {
sc val1, val2;
*value1 = ( t1 - t0 ) * do_anti_tau_limit( xy1, xy2, xy3, nx, ny );
*value2 = 0.0;
anti_tau_anti_t_and_anti_t_regular_in_time(
xy1, xy2, xy3, nx, ny, t1 - t0, &val1, &val2 );
*value1 -= val1;
*value2 -= val2;
anti_tau_anti_t_and_anti_t_limit_in_time_regular_in_space(
xy1, xy2, xy3, nx, ny, &val1, &val2 );
*value1 += val1;
*value2 += val2;
}
/**
* Evaluates the definite integral over the different time intervals.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] t0 Start of interval in `t`.
* @param[in] t1 End of interval in `t`.
* @param[in] tau0 Start of interval in `tau`.
* @param[in] tau1 End of interval in `tau`.
* @param[out] value1 Return value for anti_tau_anti_t part.
* @param[out] value2 Return value for anti_t part.
*/
#pragma omp declare simd uniform( this, nx, ny, t0, t1, tau0, tau1 ) \
simdlen( DATA_WIDTH )
void definite_integral_over_different_intervals( sc xy1, sc xy2, sc xy3,
const sc * nx, const sc * ny, sc t0, sc t1, sc tau0, sc tau1, sc * value1,
sc * value2 ) const {
sc val1, val2;
anti_tau_anti_t_and_anti_t(
xy1, xy2, xy3, nx, ny, t1 - tau1, value1, value2 );
anti_tau_anti_t_and_anti_t(
xy1, xy2, xy3, nx, ny, t1 - tau0, &val1, &val2 );
*value1 -= val1;
*value2 -= val2;
anti_tau_anti_t_and_anti_t(
xy1, xy2, xy3, nx, ny, t0 - tau1, &val1, &val2 );
*value1 -= val1;
*value2 -= val2;
anti_tau_anti_t_and_anti_t(
xy1, xy2, xy3, nx, ny, t0 - tau0, &val1, &val2 );
*value1 += val1;
*value2 += val2;
}
};
#endif /* INCLUDE_BESTHEA_SPACETIME_HEAT_HS_KERNEL_ANTIDERIVATIVE_H_ \
*/
|
GB_unop__lgamma_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lgamma_fp32_fp32)
// op(A') function: GB (_unop_tran__lgamma_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = lgammaf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgammaf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = lgammaf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lgamma_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = lgammaf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = lgammaf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lgamma_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
HannWindow.h | #ifndef _H_HANN_WINDOW_
#define _H_HANN_WINDOW_
#include <cmath>
#include <cstdio>
#define M_PI 3.14159265358979323846
class HannWindow {
private:
double *hann;
int shift_size;
int frame_size;
public:
inline HannWindow(int _frame_size, int _shift_size);
inline ~HannWindow();
// 2D
inline void Process(double ** buf, int channels);
// 1D - multi channel
inline void Process(double * buf, int channels);
// 1D - single channel
inline void Process(double * buf);
// 2D
inline void WindowWithScaling(double ** buf, int channels);
// 1D - multi channel
inline void WindowWithScaling(double * buf, int channels);
// 1D - single channel
inline void WindowWithScaling(double * buf);
};
inline HannWindow::HannWindow(int _frame_size, int _shift_size) {
int i;
double temp;
shift_size = _shift_size;
frame_size = _frame_size;
// From WPE_Online
/*
*
Nfft = BufferSize * 4;
Nfreq = Nfft / 2 + 1;
Nwin = BufferSize * 4;
*
*/
hann = new double[frame_size];
switch (frame_size / shift_size) {
case 4:
hann[0] = 0.0;
for (i = 1; i < frame_size; ++i)
hann[i] = 0.5 * (1.0 - cos(2.0 * M_PI * (double)i / (double)frame_size));
temp = sqrt((double)2 / 3);
for (i = 1; i < frame_size; i++)
hann[i] *= temp;
break;
case 2:
for (i = 0; i < frame_size; i++) {
hann[i] = sin(M_PI * (i + 0.5) / frame_size);
}
break;
default:
printf("ERROR::frame_size/shift_size(%d) is Not Supported\n",
frame_size / shift_size);
exit(-1);
}
#ifndef NDEBUG
/*
printf("INFO::HannWindow\n");
for(i=0;i<frame_size;i++)
printf("%lf ",hann[i]);
printf("\n");
*/
#endif
}
inline HannWindow::~HannWindow() { delete[] hann; }
inline void HannWindow::Process(double **buffer,
int channels) {
int i, j;
for (i = 0; i < channels; i++) {
#pragma omp parallel for
for (j = 0; j < frame_size; j++) {
buffer[i][j] *= hann[j];
}
}
}
inline void HannWindow::Process(double *buffer,
int channels) {
int i, j;
for (i = 0; i < channels; i++) {
#pragma omp parallel for
for (j = 0; j < frame_size; j++) {
buffer[i*(frame_size+2) + j] *= hann[j];
}
}
}
inline void HannWindow::Process(double *buffer){
int j;
for (j = 0; j < frame_size; j++) {
buffer[j] *= hann[j];
}
}
inline void HannWindow::WindowWithScaling(double **buffer,
int channels) {
int i, j;
for (i = 0; i < channels; i++) {
#pragma omp parallel for
for (j = 0; j < frame_size; j++) {
buffer[i][j] *= hann[j];
buffer[i][j] /= 32767.0;
}
}
}
inline void HannWindow::WindowWithScaling(double *buffer){
int j;
for (j = 0; j < frame_size; j++) {
buffer[j] *= hann[j];
buffer[j] /= 32767.0;
}
}
inline void HannWindow::WindowWithScaling(double *buffer,
int channels) {
int i, j;
for (i = 0; i < channels; i++) {
#pragma omp parallel for
for (j = 0; j < frame_size; j++) {
buffer[ i*(frame_size+2) + j] *= hann[j];
buffer[ i*(frame_size+2) + j] /= 32767.0;
}
}
}
#endif
|
GB_unop__identity_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_uint16
// op(A') function: GB_unop_tran__identity_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
1
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CALPHADConcSolverBinary3Ph2Sl.h | #ifndef included_CALPHADConcSolverBinary3Ph2Sl
#define included_CALPHADConcSolverBinary3Ph2Sl
#include "NewtonSolver.h"
#include "datatypes.h"
namespace Thermo4PFM
{
class CALPHADConcSolverBinary3Ph2Sl
: public NewtonSolver<3, CALPHADConcSolverBinary3Ph2Sl, JacobianDataType>
{
public:
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
/// compute "internal" concentrations cL, cS1, cS2 by solving KKS
/// equations
/// conc: initial guess and final solution (concentration in each phase)
int ComputeConcentration(double* const conc, const double tol,
const int max_iters, const double alpha = 1.)
{
return NewtonSolver::ComputeSolution(conc, tol, max_iters, alpha);
}
/// setup model paramater values to be used by solver,
/// including composition "c0" and phase fraction "hphi"
/// to solve for
void setup(const double c0, const double hphi0, const double hphi1,
const double hphi2, const double RTinv,
const CalphadDataType* const Lmix_L_,
const CalphadDataType* const Lmix_S0_,
const CalphadDataType* const Lmix_S1_, const CalphadDataType* const fA,
const CalphadDataType* const fB, const int* const p,
const int* const q);
/// evaluate RHS of the system of eqautions to solve for
/// specific to this solver
void RHS(const double* const x, double* const fvec);
/// evaluate Jacobian of system of equations
/// specific to this solver
void Jacobian(const double* const x, JacobianDataType** const fjac);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
private:
///
/// Nominal composition to solve for
///
double c0_;
///
/// phase fractions to solve for
///
double hphi0_;
double hphi1_;
double hphi2_;
double RTinv_;
///
/// 4 L coefficients for 3 possible phases (L, S0, S1)
///
CalphadDataType Lmix_L_[4];
CalphadDataType Lmix_S0_[4];
CalphadDataType Lmix_S1_[4];
///
/// energies of 2 species, in three phases each
///
CalphadDataType fA_[3];
CalphadDataType fB_[3];
///
/// sublattice stoichiometry
///
double p_[3];
double q_[3];
// internal functions to help evaluate RHS and Jacobian
void computeXi(const double* const c, double xi[3]) const;
void computeDxiDc(const double* const c, double dxidc[3]) const;
};
}
#endif
|
GB_unaryop__identity_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_uint32
// op(A') function: GB_tran__identity_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_uint32
(
uint64_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
double start;// = omp_get_wtime();
double end;
int Nthreads = atoi(argv[0]);
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
int rank = omp_get_thread_num();
long int seed = rank;
srand48_r(seed, drandData + rank);
}
//long int seed = 0;
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
start = omp_get_wtime();
#pragma omp parallel for reduction(+ : Ncircle)
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+0, &rand1);
drand48_r(drandData+0, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
//printf("Our estimate of pi is %g \n", pi);
}
//end = omp_get_wtime();
}
//end = omp_get_wtime()-start;
//printf("The total time was %f\n",end);
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
free(drandData);
end = omp_get_wtime() - start;
printf("The total time was %f\n", end);
//free(drandData);
return 0;
}
|
GB_sparse_add_template.c | //------------------------------------------------------------------------------
// GB_sparse_add_template: C=A+B, C<M>=A+B when C is sparse/hypersparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is sparse or hypersparse:
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse bitmap
// sparse sparse sparse full
// sparse sparse bitmap sparse
// sparse sparse bitmap bitmap
// sparse sparse bitmap full
// sparse sparse full sparse
// sparse sparse full bitmap
// sparse sparse full full
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// If all four matrices are sparse/hypersparse, and C<!M>=A+B is being
// computed, then M is passed in as NULL to GB_add_phase*. GB_add_sparsity
// returns apply_mask as false. The methods below do not handle the case when
// C is sparse, M is sparse, and !M is used. All other uses of !M when M
// is sparse result in a bitmap structure for C, and this is handled by
// GB_bitmap_add_template.
// For this case: the mask is done later, so C=A+B is computed here:
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
{
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j)
// phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < C_ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k ] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (C_to_A == NULL) ? j : C_to_A [k] ;
if (kA >= 0)
{
pA = GBP (Ap, kA, vlen) ;
pA_end = GBP (Ap, kA+1, vlen) ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
int64_t pA_start = pA ;
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1, iA_last = -1 ;
if (ajnz > 0)
{
iA_first = GBI (Ai, pA, vlen) ;
iA_last = GBI (Ai, pA_end-1, vlen) ;
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (C_to_B == NULL) ? j : C_to_B [k] ;
if (kB >= 0)
{
pB = GBP (Bp, kB, vlen) ;
pB_end = GBP (Bp, kB+1, vlen) ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
int64_t pB_start = pB ;
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1, iB_last = -1 ;
if (bjnz > 0)
{
iB_first = GBI (Bi, pB, vlen) ;
iB_last = GBI (Bi, pB_end-1, vlen) ;
}
//------------------------------------------------------------------
// get M(:,j) if M is sparse or hypersparse
//------------------------------------------------------------------
bool sparse_mask_is_easy = false ;
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (M_is_sparse_or_hyper)
{
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1],
// which is a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch_is_Mh)
{
// Ch is the same as Mh (a deep copy)
ASSERT (Ch != NULL) ;
ASSERT (M_is_hyper) ;
ASSERT (Ch [k] == M->h [k]) ;
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = GBP (Mp, kM , vlen) ;
pM_end = GBP (Mp, kM+1, vlen) ;
}
}
// The "easy mask" condition requires M to be sparse/hyper
// and structural. A and B cannot be bitmap. Also one of
// the following 3 conditions must hold:
// (1) all entries are present in A(:,j) and B == M
// (2) all entries are present in B(:,j) and A == M
// (3) both A and B are aliased to M
sparse_mask_is_easy =
Mask_struct && // M must be structural
!A_is_bitmap && // A must not be bitmap
!B_is_bitmap && // B must not be bitmap
((adense && B == M) || // one of 3 conditions holds
(bdense && A == M) ||
(A == M && B == M)) ;
// TODO: add the condition above to GB_add_sparsity,
// where adense/bdense are true for the whole matrix
// (adense is true if A is full, or sparse/hypersparse with
// all entries present). The test here is done vector by
// vector, for each A(:,j) and B(:,j). This is a finer grain
// test, as compared to a test for all of A and B.
}
//------------------------------------------------------------------
// C(:,j)<optional mask> = A (:,j) + B (:,j) or subvector
//------------------------------------------------------------------
if (M == NULL)
{
//--------------------------------------------------------------
// M is not present, or !M is sparse but not applied here
//--------------------------------------------------------------
// ------------------------------------------
// C = A + B
// ------------------------------------------
// sparse . sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (mask later)
// If all four matrices are sparse or hypersparse, and
// Mask_comp is true, the mask M is passed in to this method as
// NULL. C=A+B is computed with no mask, and !M is applied
// later.
// A and B are both sparse or hypersparse, not bitmap or
// full, but individual vectors of A and B might have all
// entries present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
#if defined ( GB_PHASE_1_OF_2 )
if (A_and_B_are_disjoint)
{
// only used by GB_wait, which computes A+T where T is the
// matrix of pending tuples for A. The pattern of pending
// tuples is always disjoint with the pattern of A.
cjnz = ajnz + bjnz ;
}
else
#endif
if (adense && bdense)
{
//----------------------------------------------------------
// Method01: A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + p, A_iso) ;
GB_LOAD_B (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + p), aij, bij, i, j) ;
#endif
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// Method02: A(:,j) dense, B(:,j) sparse: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = p + iA_first ;
Ci [pC + p] = i ;
ASSERT (Ai [pA + p] == i) ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Bi [pB + p] ;
int64_t ii = i - iA_first ;
ASSERT (Ai [pA + ii] == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + ii, A_iso) ;
GB_LOAD_B (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// Method03: A(:,j) sparse, B(:,j) dense: C(:,j) dense
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
int64_t i = p + iB_first ;
Ci [pC + p] = i ;
ASSERT (Bi [pB + p] == i) ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
#endif
}
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
// C (i,j) = A (i,j) + B (i,j)
int64_t i = Ai [pA + p] ;
int64_t ii = i - iB_first ;
ASSERT (Bi [pB + ii] == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + p, A_iso) ;
GB_LOAD_B (bij, Bx, pB + ii, B_iso) ;
GB_BINOP (GB_CX (pC + ii), aij, bij, i, j) ;
#endif
}
#endif
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method04: A(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
#endif
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method05: B(:,j) is empty
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
#endif
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method06: last A(:,j) comes before 1st B(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
pC += ajnz ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
#endif
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method07: last B(:,j) comes before 1st A(:,j)
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz + bjnz ;
#else
ASSERT (cjnz == ajnz + bjnz) ;
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
pC += bjnz ;
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
#endif
}
#if defined ( GB_PHASE_1_OF_2 )
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// Method08: A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found) cjnz-- ;
}
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// Method09: B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
// cjnz = ajnz + bjnz - nnz in the intersection
cjnz = ajnz + bjnz ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found) cjnz-- ;
}
}
#endif
else
{
//----------------------------------------------------------
// Method10: A(:,j) and B(:,j) about the same sparsity
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iA,j) = A(iA,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, iA, j) ;
}
#else
{
// C (iA,j) = A (iA,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
#endif
pA++ ;
}
else if (iA > iB)
{
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iB,j) = alpha + B(iB,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij,
iB, j) ;
}
#else
{
// C (iB,j) = B (iB,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
#endif
pB++ ;
}
else
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_2_OF_2 )
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
#endif
pA++ ;
pB++ ;
}
#if defined ( GB_PHASE_2_OF_2 )
pC++ ;
#else
cjnz++ ;
#endif
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
ajnz = (pA_end - pA) ;
bjnz = (pB_end - pB) ;
ASSERT (ajnz == 0 || bjnz == 0) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz += ajnz + bjnz ;
#else
memcpy (Ci + pC, Ai + pA, ajnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < ajnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA+p, A_iso) ;
GB_BINOP (GB_CX (pC+p), aij, beta_scalar,
Ai [pA+p], j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC+p), Ax, pA+p, A_iso) ;
}
#endif
}
#endif
memcpy (Ci + pC, Bi + pB, bjnz * sizeof (int64_t)) ;
#ifndef GB_ISO_ADD
for (int64_t p = 0 ; p < bjnz ; p++)
{
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB+p, B_iso) ;
GB_BINOP (GB_CX (pC+p), alpha_scalar, bij,
Bi [pB+p], j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC+p), Bx, pB+p, B_iso) ;
}
#endif
}
#endif
ASSERT (pC + ajnz + bjnz == pC_end) ;
#endif
}
}
else if (sparse_mask_is_easy)
{
//--------------------------------------------------------------
// special case: M is present and very easy to use
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse
// sparse sparse sparse full
// sparse sparse full sparse
// sparse sparse full full
// A and B are sparse, hypersparse or full, not bitmap.
ASSERT (!A_is_bitmap) ;
ASSERT (!B_is_bitmap) ;
ASSERT (Mask_struct) ;
int64_t mjnz = pM_end - pM ; // nnz (M (:,j))
#if defined ( GB_PHASE_1_OF_2 )
// M is structural, and sparse or hypersparse, so every entry
// in the mask is guaranteed to appear in A+B. The symbolic
// count is thus trivial.
cjnz = mjnz ;
#else
// copy the pattern into C (:,j)
int64_t pC_start = pC ;
int64_t pM_start = pM ;
memcpy (Ci + pC, Mi + pM, mjnz * sizeof (int64_t)) ;
int64_t pA_offset = pA_start - iA_first ;
int64_t pB_offset = pB_start - iB_first ;
if (adense && B == M)
{
//----------------------------------------------------------
// Method11: A dense, B == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pA_offset + i, vlen) == i) ;
ASSERT (GBI (Bi, pM, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA_offset + i, A_iso) ;
GB_LOAD_B (bij, Bx, pM, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else if (bdense && A == M)
{
//----------------------------------------------------------
// Method12: B dense, A == M
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
int64_t i = Mi [pM] ;
ASSERT (GB_mcast (Mx, pM, msize)) ;
ASSERT (GBI (Ai, pM, vlen) == i) ;
ASSERT (GBI (Bi, pB_offset + i, vlen) == i) ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pM, A_iso) ;
GB_LOAD_B (bij, Bx, pB_offset + i, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
}
}
else // (A == M) && (B == M)
{
//----------------------------------------------------------
// Method13: A == M == B: all three matrices the same
//----------------------------------------------------------
#ifndef GB_ISO_ADD
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = 0 ; p < mjnz ; p++)
{
int64_t pM = p + pM_start ;
int64_t pC = p + pC_start ;
#if GB_OP_IS_SECOND
GB_LOAD_B (t, Bx, pM, B_iso) ;
#else
GB_LOAD_A (t, Ax, pM, A_iso) ;
#endif
GB_BINOP (GB_CX (pC), t, t, Mi [pM], j) ;
}
#endif
}
#endif
}
else if (M_is_sparse_or_hyper)
{
//--------------------------------------------------------------
// Method14: C and M are sparse or hypersparse
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse sparse sparse sparse (*)
// sparse sparse sparse bitmap (*)
// sparse sparse sparse full (*)
// sparse sparse bitmap sparse (*)
// sparse sparse bitmap bitmap (+)
// sparse sparse bitmap full (+)
// sparse sparse full sparse (*)
// sparse sparse full bitmap (+)
// sparse sparse full full (+)
// (*) This method is efficient except when either A or B are
// sparse, and when M is sparse but with many entries. When M
// is sparse and either A or B are sparse, the method is
// designed to be very efficient when M is very sparse compared
// with A and/or B. It traverses all entries in the sparse M,
// and (for sparse A or B) does a binary search for entries in
// A or B. In that case, if M has many entries, the mask M
// should be ignored, and C=A+B should be computed without any
// mask. The test for when to use M here should ignore A or B
// if they are bitmap or full.
// (+) TODO: if C and M are sparse/hyper, and A and B are
// both bitmap/full, then use GB_emult_04_template instead,
// but with (Ab [p] || Bb [p]) instead of (Ab [p] && Bb [p]).
// A and B can have any sparsity pattern (hypersparse,
// sparse, bitmap, or full).
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) + B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij = GB_mcast (Mx, pM, msize) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
bool afound ;
if (adense)
{
// A is dense, bitmap, or full; use quick lookup
pA = pA_start + (i - iA_first) ;
afound = GBB (Ab, pA) ;
}
else if (A == M)
{
// A is aliased to M
pA = pM ;
afound = true ;
}
else
{
// A is sparse; use binary search. This is slow unless
// M is very sparse compared with A.
int64_t apright = pA_end - 1 ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
}
ASSERT (GB_IMPLIES (afound, GBI (Ai, pA, vlen) == i)) ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
bool bfound ;
if (bdense)
{
// B is dense; use quick lookup
pB = pB_start + (i - iB_first) ;
bfound = GBB (Bb, pB) ;
}
else if (B == M)
{
// B is aliased to M
pB = pM ;
bfound = true ;
}
else
{
// B is sparse; use binary search. This is slow unless
// M is very sparse compared with B.
int64_t bpright = pB_end - 1 ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
}
ASSERT (GB_IMPLIES (bfound, GBI (Bi, pB, vlen) == i)) ;
//----------------------------------------------------------
// C(i,j) = A(i,j) + B(i,j)
//----------------------------------------------------------
if (afound && bfound)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
else if (afound)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
else if (bfound)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//--------------------------------------------------------------
// M is bitmap or full, for either C<M>=A+B or C<!M>=A+B
//--------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// sparse bitmap sparse sparse
// sparse full sparse sparse
// This method is very efficient for any mask, and should
// always be used if M is bitmap or full, even if the mask must
// also be applied later in GB_mask or GB_accum_mask.
// Exploiting the mask here adds no extra search time, and it
// reduces the size of C on output.
// GB_GET_MIJ: get M(i,j) where M is bitmap or full
#undef GB_GET_MIJ
#define GB_GET_MIJ(i) \
int64_t pM = pM_start + i ; \
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \
if (Mask_comp) mij = !mij ;
// A and B are sparse or hypersparse, not bitmap or full,
// but individual vectors of A and B might have all entries
// present (adense and/or bdense).
ASSERT (A_is_sparse || A_is_hyper) ;
ASSERT (B_is_sparse || B_is_hyper) ;
int64_t pM_start = j * vlen ;
if (adense && bdense)
{
//----------------------------------------------------------
// Method15: A(:,j) and B(:,j) dense, M bitmap/full
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = p + iA_first ;
ASSERT (Ai [pA + p] == i) ;
ASSERT (Bi [pB + p] == i) ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA + p, A_iso) ;
GB_LOAD_B (bij, Bx, pB + p, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, i, j) ;
#endif
pC++ ;
#endif
}
}
}
else if (ajnz == 0)
{
//----------------------------------------------------------
// Method16: A(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j), or alpha + B(i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else if (bjnz == 0)
{
//----------------------------------------------------------
// Method17: B(:,j) is empty, M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else if (iA_last < iB_first)
{
//----------------------------------------------------------
// Method18:last A(:,j) before 1st B(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else if (iB_last < iA_first)
{
//----------------------------------------------------------
// Method19:last B(:,j) before 1st A(:,j), M bitmap/full
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
GB_GET_MIJ (i) ;
if (mij)
{
// C (i,j) = B (i,j), or alpha + B(i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = alpha + B(i,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij, i, j) ;
}
#else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
GB_GET_MIJ (i) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (i,j) = A(i,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, i, j) ;
}
#else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
else
{
//----------------------------------------------------------
// Method20: merge A(:,j) and B(:,j), M bitmap/full
//----------------------------------------------------------
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
GB_GET_MIJ (iA) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iA,j) = A(iA,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar,
iA, j);
}
#else
{
// C (iA,j) = A (iA,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
pA++ ;
}
else if (iA > iB)
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j), or alpha + B(iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iB,j) = alpha + B(iB,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij,
iB, j) ;
}
#else
{
// C (iB,j) = B (iB,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
pB++ ;
}
else
{
GB_GET_MIJ (iB) ;
if (mij)
{
// C (i,j) = A (i,j) + B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), aij, bij, iB, j) ;
#endif
pC++ ;
#endif
}
pA++ ;
pB++ ;
}
}
//----------------------------------------------------------
// A (:,j) or B (:,j) have entries left; not both
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t iA = Ai [pA] ;
GB_GET_MIJ (iA) ;
if (mij)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iA ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iA,j) = A(iA,j) + beta
GB_LOAD_A (aij, Ax, pA, A_iso) ;
GB_BINOP (GB_CX (pC), aij, beta_scalar, iA, j) ;
}
#else
{
// C (iA,j) = A (iA,j)
GB_COPY_A_TO_C (GB_CX (pC), Ax, pA, A_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
for ( ; pB < pB_end ; pB++)
{
int64_t iB = Bi [pB] ;
GB_GET_MIJ (iB) ;
if (mij)
{
// C (iB,j) = B (iB,j), or alpha + B(iB,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = iB ;
#ifndef GB_ISO_ADD
#ifdef GB_EWISEUNION
{
// C (iB,j) = alpha + B(iB,j)
GB_LOAD_B (bij, Bx, pB, B_iso) ;
GB_BINOP (GB_CX (pC), alpha_scalar, bij,
iB, j) ;
}
#else
{
// C (iB,j) = B (iB,j)
GB_COPY_B_TO_C (GB_CX (pC), Bx, pB, B_iso) ;
}
#endif
#endif
pC++ ;
#endif
}
}
}
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
rawSHA256_ng_fmt_plug.c | /*
* Copyright 2013, epixoip.
* AVX2 support, Copyright (c) 2015 magnum
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistribution of source
* retains the above copyright.
*/
#include "arch.h"
#if SIMD_COEF_32
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256_ng;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256_ng);
#else
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#if _OPENMP
#include <omp.h>
#if __XOP__
#ifndef OMP_SCALE
#define OMP_SCALE 512 /* AMD */
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512 /* Intel */
#endif
#endif
#endif
#include "misc.h"
#if !defined(DEBUG) && !defined(WITH_ASAN)
// These compilers claim to be __GNUC__ but warn on gcc pragmas.
#if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER
#pragma GCC optimize 3
#endif
#endif
#include <string.h>
#include "stdint.h"
#include "pseudo_intrinsics.h"
#include "common.h"
#include "formats.h"
#include "aligned.h"
#include "memdbg.h"
#if __MIC__
#define SIMD_TYPE "512/512 MIC 16x"
#elif __AVX512F__
#define SIMD_TYPE "512/512 AVX512 16x"
#elif __AVX2__
#define SIMD_TYPE "256/256 AVX2 8x"
#elif __ALTIVEC__
#define SIMD_TYPE "128/128 AltiVec 4x"
#elif __ARM_NEON
#define SIMD_TYPE "128/128 NEON 4x"
#elif __XOP__
#define SIMD_TYPE "128/128 XOP 4x"
#elif __SSE4_1__
#define SIMD_TYPE "128/128 SSE4.1 4x"
#elif __SSSE3__
#define SIMD_TYPE "128/128 SSSE3 4x"
#else
#define SIMD_TYPE "128/128 SSE2 4x"
#endif
#define BINARY_SIZE 4
#define FORMAT_LABEL "Raw-SHA256-ng"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA256 " SIMD_TYPE
#define VWIDTH SIMD_COEF_32
#define MAXLEN 55
#define PLAINTEXT_LENGTH MAXLEN
#define CIPHERTEXT_LENGTH 64
#define DIGEST_SIZE 32
#define _RAWSHA256_H
#include "rawSHA256_common.h"
#undef _RAWSHA256_H
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT VWIDTH
#define MAX_KEYS_PER_CRYPT VWIDTH
#if __SSE4_1__ && !__AVX2__
#undef GATHER
#define GATHER(x, y, z) \
{ \
x = _mm_cvtsi32_si128( y[index][z] ); \
x = _mm_insert_epi32(x, y[index + 1][z], 1); \
x = _mm_insert_epi32(x, y[index + 2][z], 2); \
x = _mm_insert_epi32(x, y[index + 3][z], 3); \
}
#endif
#define S0(x) \
( \
vxor( \
vroti_epi32(x, -22), \
vxor( \
vroti_epi32(x, -2), \
vroti_epi32(x, -13) \
) \
) \
)
#define S1(x) \
( \
vxor( \
vroti_epi32(x, -25), \
vxor( \
vroti_epi32(x, -6), \
vroti_epi32(x, -11) \
) \
) \
)
#define s0(x) \
( \
vxor( \
vsrli_epi32(x, 3), \
vxor( \
vroti_epi32(x, -7), \
vroti_epi32(x, -18) \
) \
) \
)
#define s1(x) \
( \
vxor( \
vsrli_epi32(x, 10), \
vxor( \
vroti_epi32(x, -17), \
vroti_epi32(x, -19) \
) \
) \
)
#if !VCMOV_EMULATED
#define Maj(x,y,z) vcmov(x, y, vxor(z, y))
#else
#define Maj(x,y,z) vor(vand(x, y), vand(vor(x, y), z))
#endif
#define Ch(x,y,z) vcmov(y, z, x)
#define R(t) \
{ \
w[t] = vadd_epi32(s1(w[t - 2]), w[t - 7]); \
w[t] = vadd_epi32(s0(w[t - 15]), w[t]); \
w[t] = vadd_epi32( w[t - 16], w[t]); \
}
#define SHA256_STEP(a,b,c,d,e,f,g,h,x,K) \
{ \
if (x > 15) R(x); \
tmp1 = vadd_epi32(h, S1(e)); \
tmp1 = vadd_epi32(tmp1, Ch(e,f,g)); \
tmp1 = vadd_epi32(tmp1, vset1_epi32(K)); \
tmp1 = vadd_epi32(tmp1, w[x]); \
tmp2 = vadd_epi32(S0(a),Maj(a,b,c)); \
d = vadd_epi32(tmp1, d); \
h = vadd_epi32(tmp1, tmp2); \
}
static uint32_t (*saved_key)[64];
static uint32_t *crypt_key[ 8];
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), VWIDTH * 4);
for (i = 0; i < 8; i++)
crypt_key[i] = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(uint32_t), VWIDTH * 4);
}
static void done(void)
{
int i;
for (i = 0; i < 8; i++)
MEM_FREE(crypt_key[i]);
MEM_FREE(saved_key);
}
static int get_hash_0(int index) { return crypt_key[0][index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[0][index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[0][index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[0][index] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[0][index] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[0][index] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[0][index] & PH_MASK_6; }
static void set_key(char *key, int index)
{
uint32_t *buf32 = (uint32_t*) &saved_key[index];
uint8_t *buf8 = (uint8_t*) buf32;
int len = 0;
while (*key)
buf8[len++] = *key++;
buf32[15] = len << 3;
buf8[len++] = 0x80;
while (buf8[len] && len <= MAXLEN)
buf8[len++] = 0;
}
static char *get_key(int index)
{
uint32_t *buf = (uint32_t*) &saved_key[index];
static char out[MAXLEN + 1];
int len = buf[15] >> 3;
memset(out, 0, MAXLEN + 1);
memcpy(out, buf, len);
return (char*) out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += VWIDTH)
#endif
{
vtype a, b, c, d, e, f, g, h;
vtype w[64], tmp1, tmp2;
int i;
#if __SSE4_1__ && !__AVX2__
for (i=0; i < 16; i++) GATHER(w[i], saved_key, i);
for (i=0; i < 15; i++) vswap32(w[i]);
#else
JTR_ALIGN(VWIDTH * 4) uint32_t __w[16][VWIDTH];
int j;
for (i=0; i < VWIDTH; i++)
for (j=0; j < 16; j++)
__w[j][i] = saved_key[index + i][j];
for (i=0; i < 15; i++)
{
w[i] = vload((vtype*) __w[i]);
vswap32(w[i]);
}
w[15] = vload((vtype*) __w[15]);
#endif
a = vset1_epi32(0x6a09e667);
b = vset1_epi32(0xbb67ae85);
c = vset1_epi32(0x3c6ef372);
d = vset1_epi32(0xa54ff53a);
e = vset1_epi32(0x510e527f);
f = vset1_epi32(0x9b05688c);
g = vset1_epi32(0x1f83d9ab);
h = vset1_epi32(0x5be0cd19);
SHA256_STEP(a, b, c, d, e, f, g, h, 0, 0x428a2f98);
SHA256_STEP(h, a, b, c, d, e, f, g, 1, 0x71374491);
SHA256_STEP(g, h, a, b, c, d, e, f, 2, 0xb5c0fbcf);
SHA256_STEP(f, g, h, a, b, c, d, e, 3, 0xe9b5dba5);
SHA256_STEP(e, f, g, h, a, b, c, d, 4, 0x3956c25b);
SHA256_STEP(d, e, f, g, h, a, b, c, 5, 0x59f111f1);
SHA256_STEP(c, d, e, f, g, h, a, b, 6, 0x923f82a4);
SHA256_STEP(b, c, d, e, f, g, h, a, 7, 0xab1c5ed5);
SHA256_STEP(a, b, c, d, e, f, g, h, 8, 0xd807aa98);
SHA256_STEP(h, a, b, c, d, e, f, g, 9, 0x12835b01);
SHA256_STEP(g, h, a, b, c, d, e, f, 10, 0x243185be);
SHA256_STEP(f, g, h, a, b, c, d, e, 11, 0x550c7dc3);
SHA256_STEP(e, f, g, h, a, b, c, d, 12, 0x72be5d74);
SHA256_STEP(d, e, f, g, h, a, b, c, 13, 0x80deb1fe);
SHA256_STEP(c, d, e, f, g, h, a, b, 14, 0x9bdc06a7);
SHA256_STEP(b, c, d, e, f, g, h, a, 15, 0xc19bf174);
SHA256_STEP(a, b, c, d, e, f, g, h, 16, 0xe49b69c1);
SHA256_STEP(h, a, b, c, d, e, f, g, 17, 0xefbe4786);
SHA256_STEP(g, h, a, b, c, d, e, f, 18, 0x0fc19dc6);
SHA256_STEP(f, g, h, a, b, c, d, e, 19, 0x240ca1cc);
SHA256_STEP(e, f, g, h, a, b, c, d, 20, 0x2de92c6f);
SHA256_STEP(d, e, f, g, h, a, b, c, 21, 0x4a7484aa);
SHA256_STEP(c, d, e, f, g, h, a, b, 22, 0x5cb0a9dc);
SHA256_STEP(b, c, d, e, f, g, h, a, 23, 0x76f988da);
SHA256_STEP(a, b, c, d, e, f, g, h, 24, 0x983e5152);
SHA256_STEP(h, a, b, c, d, e, f, g, 25, 0xa831c66d);
SHA256_STEP(g, h, a, b, c, d, e, f, 26, 0xb00327c8);
SHA256_STEP(f, g, h, a, b, c, d, e, 27, 0xbf597fc7);
SHA256_STEP(e, f, g, h, a, b, c, d, 28, 0xc6e00bf3);
SHA256_STEP(d, e, f, g, h, a, b, c, 29, 0xd5a79147);
SHA256_STEP(c, d, e, f, g, h, a, b, 30, 0x06ca6351);
SHA256_STEP(b, c, d, e, f, g, h, a, 31, 0x14292967);
SHA256_STEP(a, b, c, d, e, f, g, h, 32, 0x27b70a85);
SHA256_STEP(h, a, b, c, d, e, f, g, 33, 0x2e1b2138);
SHA256_STEP(g, h, a, b, c, d, e, f, 34, 0x4d2c6dfc);
SHA256_STEP(f, g, h, a, b, c, d, e, 35, 0x53380d13);
SHA256_STEP(e, f, g, h, a, b, c, d, 36, 0x650a7354);
SHA256_STEP(d, e, f, g, h, a, b, c, 37, 0x766a0abb);
SHA256_STEP(c, d, e, f, g, h, a, b, 38, 0x81c2c92e);
SHA256_STEP(b, c, d, e, f, g, h, a, 39, 0x92722c85);
SHA256_STEP(a, b, c, d, e, f, g, h, 40, 0xa2bfe8a1);
SHA256_STEP(h, a, b, c, d, e, f, g, 41, 0xa81a664b);
SHA256_STEP(g, h, a, b, c, d, e, f, 42, 0xc24b8b70);
SHA256_STEP(f, g, h, a, b, c, d, e, 43, 0xc76c51a3);
SHA256_STEP(e, f, g, h, a, b, c, d, 44, 0xd192e819);
SHA256_STEP(d, e, f, g, h, a, b, c, 45, 0xd6990624);
SHA256_STEP(c, d, e, f, g, h, a, b, 46, 0xf40e3585);
SHA256_STEP(b, c, d, e, f, g, h, a, 47, 0x106aa070);
SHA256_STEP(a, b, c, d, e, f, g, h, 48, 0x19a4c116);
SHA256_STEP(h, a, b, c, d, e, f, g, 49, 0x1e376c08);
SHA256_STEP(g, h, a, b, c, d, e, f, 50, 0x2748774c);
SHA256_STEP(f, g, h, a, b, c, d, e, 51, 0x34b0bcb5);
SHA256_STEP(e, f, g, h, a, b, c, d, 52, 0x391c0cb3);
SHA256_STEP(d, e, f, g, h, a, b, c, 53, 0x4ed8aa4a);
SHA256_STEP(c, d, e, f, g, h, a, b, 54, 0x5b9cca4f);
SHA256_STEP(b, c, d, e, f, g, h, a, 55, 0x682e6ff3);
SHA256_STEP(a, b, c, d, e, f, g, h, 56, 0x748f82ee);
SHA256_STEP(h, a, b, c, d, e, f, g, 57, 0x78a5636f);
SHA256_STEP(g, h, a, b, c, d, e, f, 58, 0x84c87814);
SHA256_STEP(f, g, h, a, b, c, d, e, 59, 0x8cc70208);
SHA256_STEP(e, f, g, h, a, b, c, d, 60, 0x90befffa);
SHA256_STEP(d, e, f, g, h, a, b, c, 61, 0xa4506ceb);
SHA256_STEP(c, d, e, f, g, h, a, b, 62, 0xbef9a3f7);
SHA256_STEP(b, c, d, e, f, g, h, a, 63, 0xc67178f2);
a = vadd_epi32(a, vset1_epi32(0x6a09e667));
b = vadd_epi32(b, vset1_epi32(0xbb67ae85));
c = vadd_epi32(c, vset1_epi32(0x3c6ef372));
d = vadd_epi32(d, vset1_epi32(0xa54ff53a));
e = vadd_epi32(e, vset1_epi32(0x510e527f));
f = vadd_epi32(f, vset1_epi32(0x9b05688c));
g = vadd_epi32(g, vset1_epi32(0x1f83d9ab));
h = vadd_epi32(h, vset1_epi32(0x5be0cd19));
vstore((vtype*) &crypt_key[0][index], a);
vstore((vtype*) &crypt_key[1][index], b);
vstore((vtype*) &crypt_key[2][index], c);
vstore((vtype*) &crypt_key[3][index], d);
vstore((vtype*) &crypt_key[4][index], e);
vstore((vtype*) &crypt_key[5][index], f);
vstore((vtype*) &crypt_key[6][index], g);
vstore((vtype*) &crypt_key[7][index], h);
}
return count;
}
static int cmp_all(void *binary, int count)
{
vtype bin;
vtype digest;
int i = 0;
#ifdef _OPENMP
for (i = 0; i < count; i += VWIDTH)
#endif
{
digest = vload((vtype*) &crypt_key[0][i]);
bin = vset1_epi32(((uint32_t*) binary)[0]);
if (vanyeq_epi32(bin, digest))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return ((uint32_t*) binary)[0] == crypt_key[0][index];
}
static int cmp_exact(char *source, int index)
{
ARCH_WORD_32 *binary = sha256_common_binary(source);
int i;
for (i = 0; i < 8; i++)
if (((uint32_t*) binary)[i] != crypt_key[i][index])
return 0;
return 1;
}
struct fmt_main fmt_rawSHA256_ng = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
MAXLEN,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
sha256_common_tests
}, {
init,
done,
fmt_default_reset,
sha256_common_prepare,
sha256_common_valid,
sha256_common_split,
sha256_common_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* SIMD_COEF_32 */
|
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/dataset.h>
#include <string>
#include <vector>
#include <memory>
namespace LightGBM {
#define kMaxTreeOutput (100)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
*/
explicit Tree(const std::string& str);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param bin_type type of this feature, numerical or categorical
* \param threshold Threshold(bin) of split
* \param real_feature Index of feature, the original index on data
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int Split(int leaf, int feature, BinType bin_type, uint32_t threshold, int real_feature,
double threshold_double, double left_value,
double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 512) if (num_leaves_ >= 1024)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
if (leaf_value_[i] > kMaxTreeOutput) { leaf_value_[i] = kMaxTreeOutput; }
else if (leaf_value_[i] < -kMaxTreeOutput) { leaf_value_[i] = -kMaxTreeOutput; }
}
shrinkage_ *= rate;
}
/*! \brief Serialize this object to string*/
std::string ToString();
/*! \brief Serialize this object to json*/
std::string ToJSON();
template<typename T>
static bool CategoricalDecision(T fval, T threshold) {
if (static_cast<int>(fval) == static_cast<int>(threshold)) {
return true;
} else {
return false;
}
}
template<typename T>
static bool NumericalDecision(T fval, T threshold) {
if (fval <= threshold) {
return true;
} else {
return false;
}
}
static const char* GetDecisionTypeName(int8_t type) {
if (type == 0) {
return "no_greater";
} else {
return "is";
}
}
static std::vector<bool(*)(uint32_t, uint32_t)> inner_decision_funs;
static std::vector<bool(*)(double, double)> decision_funs;
private:
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
/*! \brief Serialize one node to json*/
inline std::string NodeToJSON(int index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
/*! \brief Decision type, 0 for '<='(numerical feature), 1 for 'is'(categorical feature) */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<double> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<data_size_t> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<data_size_t> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
bool has_categorical_;
};
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return 0.0f;
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
while (node >= 0) {
if (decision_funs[decision_type_[node]](
feature_values[split_feature_[node]],
threshold_[node])) {
node = left_child_[node];
} else {
node = right_child_[node];
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveBlurImage)
#endif
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const Quantum
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i;
ssize_t
center,
j;
j=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
register const double
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if (((sharp_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveSharpenImage)
#endif
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
register ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*(columns+2)+x_offset);
s=q-(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
register ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) ResetMagickMemory(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) ResetMagickMemory(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,kuwahara_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
register size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
register ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_KuwaharaImage)
#endif
proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanLinePixels,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanLinePixels_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanLinePixels));
if (scanLinePixels_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
register ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult),
q);
SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)*
mult),q);
SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult),
q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register MagickRealType
*magick_restrict k;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MotionBlurImage)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
extern const char
DefaultTileFrame[];
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",
(double) colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t)
radius,(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",
(double) (percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
geometry.width=(size_t) (2*i+2);
geometry.height=(size_t) (2*i+2);
geometry.x=(i-1)/2;
geometry.y=(i-1)/2;
(void) RaiseImage(preview_image,&geometry,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*
degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%.20gb ",factor,(double) ((MagickOffsetType)
GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,
MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const Quantum
*magick_restrict r;
register ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RotationalBlurImage)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
register const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
register const Quantum
*magick_restrict l,
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
register ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if (((blur_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SelectiveBlurImage)
#endif
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetPixelIntensity(linear_image,post)+
GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetPixelIntensity(linear_image,pre)-
GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if (((shade_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(linear_image,center) <= (QuantumRange/2)))
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadeImage)
#endif
proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SpreadImage)
#endif
proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if (((unsharp_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UnsharpMaskImage)
#endif
proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
triangle_mesh.h | /**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/core/geometry/aabb.h"
#include "saiga/core/geometry/triangle.h"
#include "saiga/core/geometry/vertex.h"
#include "saiga/core/math/math.h"
#include "saiga/core/util/assert.h"
#include "Mesh.h"
#include <algorithm>
#include <cstring>
#include <numeric>
namespace Saiga
{
/*
* Data structur for simple triangle meshes.
* Can be turned into a IndexedVertexBuffer for drawing with OpenGL
*/
template <typename vertex_t, typename index_t>
class TriangleMesh : public Mesh<vertex_t>
{
public:
using VertexType = vertex_t;
using IndexType = index_t;
using Base = Mesh<vertex_t>;
using Base::aabb;
using Base::addVertex;
using Base::size;
using Base::vertices;
using Face = Vector<IndexType, 3>;
void transformNormal(const mat4& trafo);
/*
* Deletes all vertices and faces.
*/
void clear()
{
Base::clear();
faces.resize(0);
}
/*
* Adds face to mesh.
* The indices of the face should match existing vertices
* return: index of new face
*/
int addFace(const Face& f)
{
faces.push_back(f);
return faces.size() - 1;
}
int addFace(index_t f[3]) { return addFace(Face(f[0], f[1], f[2])); }
int addFace(index_t v0, index_t v1, index_t v2) { return addFace(Face(v0, v1, v2)); }
/*
* Adds given vertices and the 2 corresponding triangles to mesh
*/
void addQuad(vertex_t verts[4]);
void addTriangle(vertex_t verts[3]);
void addTriangle(const Triangle& t);
/*
* Adds 2 Triangles given by 4 vertices and form a quad.
* The vertices should be orderd counter clockwise
*/
void addQuad(index_t inds[4]);
/*
* Subdivides the triangle at index 'face' into 4 triangles.
* The new triangles will be added to the mesh and the old will be overwritten
*/
void subdivideFace(int face);
/*
* Inverts the triangle at index 'face'.
* The order of the indices will be reversed.
*/
void invertFace(int face);
void invertMesh();
/*
* Converts the index face data structur to a simple triangle list.
*/
std::vector<Triangle> toTriangleList() const;
/*
* Adds the complete mesh 'other' to the current mesh.
*/
void addMesh(const TriangleMesh<vertex_t, index_t>& other);
template <typename mesh_vertex_t, typename mesh_index_t>
void addMesh(const TriangleMesh<mesh_vertex_t, mesh_index_t>& other);
/**
* Computes the per vertex normal by weighting each face normal by its surface area.
*/
void computePerVertexNormal();
/**
* Removes all vertices that are not referenced by a triangle.
* Computes the new vertex indices for each triangle.
*/
void removeUnusedVertices();
/**
* Computes the size in bytes for this triangle mesh.
*/
size_t size() const;
void free();
int numIndices() const { return faces.size() * 3; }
bool isValid() const;
bool empty() const { return faces.empty() || vertices.empty(); }
/**
* Sorts the vertices by (x,y,z) lexical.
* The face indices are correct to match the new vertices.
*/
void sortVerticesByPosition(double epsilon = 1e-5);
/**
* Removes subsequent vertices if they have identical position.
* It make sense to call it after 'sortVerticesByPosition'.
*
* The face indices are updated accordingly.
*/
void removeSubsequentDuplicates(double epsilon = 1e-5);
/**
* Removes all triangles, which reference a vertex twice
*/
void removeDegenerateFaces();
float distancePointMesh(const vec3& x);
template <typename v, typename i>
friend std::ostream& operator<<(std::ostream& os, const TriangleMesh<v, i>& dt);
std::vector<index_t> getIndexList() const
{
std::vector<index_t> indices(numIndices());
std::copy(&faces[0](0), &faces[0](0) + numIndices(), indices.data());
return indices;
}
/**
* Writes this mesh in OFF format to the given output stream.
*/
// void saveMeshOff(std::ostream& strm) const;
// void saveMeshOffColor(std::ostream& strm) const;
public:
// std::vector<vertex_t> vertices;
std::vector<Face> faces;
};
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::transformNormal(const mat4& trafo)
{
for (vertex_t& v : vertices)
{
vec4 p = make_vec4(make_vec3(v.normal), 0);
p = trafo * p;
v.normal = make_vec4(make_vec3(p), v.normal[3]);
}
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addQuad(vertex_t verts[])
{
int index = vertices.size();
for (int i = 0; i < 4; i++)
{
addVertex(verts[i]);
}
faces.push_back(Face(index, index + 1, index + 2));
faces.push_back(Face(index, index + 2, index + 3));
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addTriangle(vertex_t verts[])
{
int index = vertices.size();
for (int i = 0; i < 3; i++)
{
addVertex(verts[i]);
}
faces.push_back(Face(index, index + 1, index + 2));
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addTriangle(const Triangle& t)
{
vertex_t ts[3];
ts[0].position = make_vec4(t.a, 1);
ts[1].position = make_vec4(t.b, 1);
ts[2].position = make_vec4(t.c, 1);
addTriangle(ts);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addQuad(index_t inds[])
{
faces.push_back(Face(inds[0], inds[1], inds[2]));
faces.push_back(Face(inds[2], inds[3], inds[0]));
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::subdivideFace(int f)
{
Face face = faces[f];
// create 3 new vertices in the middle of the edges
auto p1 = vertices[face(0)].position;
auto p2 = vertices[face(1)].position;
auto p3 = vertices[face(2)].position;
int v1 = addVertex(vertex_t(vec4((p1 + p2) / 2.0f)));
int v2 = addVertex(vertex_t(vec4((p1 + p3) / 2.0f)));
int v3 = addVertex(vertex_t(vec4((p2 + p3) / 2.0f)));
faces.push_back(Face(face(1), v3, v1));
faces.push_back(Face(face(2), v2, v3));
faces.push_back(Face(v1, v3, v2));
faces[f] = Face(face(0), v1, v2);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::invertFace(int f)
{
Face& face = faces[f];
Face face2;
face2(0) = face(2);
face2(1) = face(1);
face2(2) = face(0);
face = face2;
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::invertMesh()
{
for (Face& face : faces)
{
Face face2;
face2(0) = face(2);
face2(1) = face(1);
face2(2) = face(0);
face = face2;
}
for (vertex_t& v : vertices)
{
v.normal = -v.normal;
}
}
template <typename vertex_t, typename index_t>
std::vector<Triangle> TriangleMesh<vertex_t, index_t>::toTriangleList() const
{
std::vector<Triangle> output;
Triangle t;
for (const Face& f : faces)
{
t.a = make_vec3(vertices[f(0)].position);
t.b = make_vec3(vertices[f(1)].position);
t.c = make_vec3(vertices[f(2)].position);
output.push_back(t);
}
return output;
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addMesh(const TriangleMesh<vertex_t, index_t>& other)
{
int oldVertexCount = this->vertices.size();
for (vertex_t v : other.vertices)
{
this->vertices.push_back(v);
}
for (Face f : other.faces)
{
f(0) += oldVertexCount;
f(1) += oldVertexCount;
f(2) += oldVertexCount;
this->addFace(f);
}
}
template <typename vertex_t, typename index_t>
template <typename mesh_vertex_t, typename mesh_index_t>
void TriangleMesh<vertex_t, index_t>::addMesh(const TriangleMesh<mesh_vertex_t, mesh_index_t>& other)
{
int oldVertexCount = this->vertices.size();
for (vertex_t v : other.vertices)
{
this->vertices.push_back(v);
}
for (auto f : other.faces)
{
f(0) += oldVertexCount;
f(1) += oldVertexCount;
f(2) += oldVertexCount;
this->addFace(f);
}
}
template <typename vertex_t, typename index_t>
size_t TriangleMesh<vertex_t, index_t>::size() const
{
return faces.capacity() * sizeof(Face) + vertices.capacity() * sizeof(vertex_t) +
sizeof(TriangleMesh<vertex_t, index_t>);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::free()
{
faces.clear();
faces.shrink_to_fit();
vertices.clear();
vertices.shrink_to_fit();
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::computePerVertexNormal()
{
//#pragma omp parallel for
for (int i = 0; i < (int)vertices.size(); ++i)
{
// Note:
// We keep the original w value intact, because it might be used
// by the application.
vec3 n = make_vec3(0);
vertices[i].normal = make_vec4(n, vertices[i].normal[3]);
}
//#pragma omp parallel for
for (int i = 0; i < (int)faces.size(); ++i)
{
Face& f = faces[i];
vec3 a = make_vec3(vertices[f(0)].position);
vec3 b = make_vec3(vertices[f(1)].position);
vec3 c = make_vec3(vertices[f(2)].position);
vec3 n = cross(b - a, c - a);
// Note: do not normalize here because the length is the surface area
vertices[f(0)].normal += make_vec4(n, 0);
vertices[f(1)].normal += make_vec4(n, 0);
vertices[f(2)].normal += make_vec4(n, 0);
}
//#pragma omp parallel for
for (int i = 0; i < (int)vertices.size(); ++i)
{
vec3 n = normalize(make_vec3(vertices[i].normal));
vertices[i].normal = make_vec4(n, vertices[i].normal[3]);
}
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::removeUnusedVertices()
{
std::vector<int> vmap(vertices.size(), -1);
auto vcopy = vertices;
vertices.clear();
for (int i = 0; i < (int)faces.size(); ++i)
{
auto& f = faces[i];
for (int i = 0; i < 3; ++i)
{
auto& v = f[i];
if (vmap[v] == -1)
{
int count = vertices.size();
vmap[v] = count;
vertices.push_back(vcopy[v]);
}
v = vmap[v];
}
}
}
template <typename vertex_t, typename index_t>
bool TriangleMesh<vertex_t, index_t>::isValid() const
{
// check if all referenced vertices exist
for (Face f : faces)
{
if (f(0) < 0 || f(0) >= (int)vertices.size()) return false;
if (f(1) < 0 || f(1) >= (int)vertices.size()) return false;
if (f(2) < 0 || f(2) >= (int)vertices.size()) return false;
}
return true;
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::sortVerticesByPosition(double epsilon)
{
double eps_squared = epsilon * epsilon;
std::vector<int> tmp_indices(vertices.size());
std::vector<int> tmp_indices2(vertices.size());
std::iota(tmp_indices.begin(), tmp_indices.end(), 0);
std::sort(tmp_indices.begin(), tmp_indices.end(), [&](int a, int b) {
vec4 p1 = vertices[a].position;
vec4 p2 = vertices[b].position;
vec4 p3 = (p1 - p2);
p3 = p3.array() * p3.array();
p1[0] = p3[0] < eps_squared ? p2[0] : p1[0];
p1[1] = p3[1] < eps_squared ? p2[1] : p1[1];
p1[2] = p3[2] < eps_squared ? p2[2] : p1[2];
return std::tie(p1[0], p1[1], p1[2]) < std::tie(p2[0], p2[1], p2[2]);
});
std::vector<vertex_t> new_vertices(vertices.size());
for (int i = 0; i < (int)new_vertices.size(); ++i)
{
new_vertices[i] = vertices[tmp_indices[i]];
tmp_indices2[tmp_indices[i]] = i;
}
for (auto& f : faces)
{
f(0) = tmp_indices2[f(0)];
f(1) = tmp_indices2[f(1)];
f(2) = tmp_indices2[f(2)];
}
vertices.swap(new_vertices);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::removeSubsequentDuplicates(double epsilon)
{
if (vertices.size() <= 1) return;
double eps_squared = epsilon * epsilon;
std::vector<int> tmp_indices(vertices.size());
std::vector<bool> valid(vertices.size(), false);
std::vector<vertex_t> new_vertices;
int currentIdx = -1;
vec4 currentPos;
for (int i = 0; i < (int)vertices.size(); ++i)
{
auto& p = vertices[i].position;
if (i == 0 || (p - currentPos).squaredNorm() > eps_squared)
{
new_vertices.push_back(vertices[i]);
currentIdx++;
currentPos = p;
valid[i] = true;
}
tmp_indices[i] = currentIdx;
}
for (int i = 0; i < (int)vertices.size(); ++i)
{
if (valid[i]) new_vertices[tmp_indices[i]] = vertices[i];
}
for (auto& f : faces)
{
f(0) = tmp_indices[f(0)];
f(1) = tmp_indices[f(1)];
f(2) = tmp_indices[f(2)];
}
vertices.swap(new_vertices);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::removeDegenerateFaces()
{
faces.erase(std::remove_if(faces.begin(), faces.end(),
[](const Face& f) { return f(0) == f(1) || f(0) == f(2) || f(1) == f(2); }),
faces.end());
}
template <typename vertex_t, typename index_t>
float TriangleMesh<vertex_t, index_t>::distancePointMesh(const vec3& x)
{
float dis = std::numeric_limits<float>::infinity();
for (const Face& f : faces)
{
Triangle t;
t.a = make_vec3(vertices[f(0)].position);
t.b = make_vec3(vertices[f(1)].position);
t.c = make_vec3(vertices[f(2)].position);
dis = std::min(dis, t.Distance(x));
}
return dis;
}
template <typename vertex_t, typename index_t>
std::ostream& operator<<(std::ostream& os, const TriangleMesh<vertex_t, index_t>& dt)
{
os << "TriangleMesh. V=" << dt.vertices.size() << " F=" << dt.faces.size();
return os;
}
template <typename vertex_t, typename index_t>
void saveMeshOff(const TriangleMesh<vertex_t, index_t>& mesh, std::ostream& strm)
{
strm << "OFF"
<< "\n";
// first line: number of vertices, number of faces, number of edges (can be ignored)
strm << mesh.vertices.size() << " " << mesh.faces.size() << " 0"
<< "\n";
for (auto const& v : mesh.vertices)
{
strm << v.position[0] << " " << v.position[1] << " " << v.position[2] << "\n";
}
for (auto const& f : mesh.faces)
{
strm << "3"
<< " " << f[0] << " " << f[1] << " " << f[2] << "\n";
}
}
template <typename vertex_t, typename index_t>
void saveMeshOffColor(const TriangleMesh<vertex_t, index_t>& mesh, std::ostream& strm)
{
strm << "COFF"
<< "\n";
// first line: number of vertices, number of faces, number of edges (can be ignored)
strm << mesh.vertices.size() << " " << mesh.faces.size() << " 0"
<< "\n";
for (auto const& v : mesh.vertices)
{
strm << v.position[0] << " " << v.position[1] << " " << v.position[2] << " ";
strm << v.color[0] << " " << v.color[1] << " " << v.color[2] << " " << v.color[3] << "\n";
}
for (auto const& f : mesh.faces)
{
strm << "3"
<< " " << f[0] << " " << f[1] << " " << f[2] << "\n";
}
}
} // namespace Saiga
|
RingSettlementCircuit.h | #ifndef _RINGSETTLEMENTCIRCUIT_H_
#define _RINGSETTLEMENTCIRCUIT_H_
#include "Circuit.h"
#include "../Utils/Constants.h"
#include "../Utils/Data.h"
#include "../Utils/Utils.h"
#include "../Gadgets/MatchingGadgets.h"
#include "../Gadgets/AccountGadgets.h"
#include "../Gadgets/TradingHistoryGadgets.h"
#include "../Gadgets/MathGadgets.h"
#include "ethsnarks.hpp"
#include "utils.hpp"
#include "gadgets/subadd.hpp"
using namespace ethsnarks;
namespace Loopring
{
// Transforms the DA data for ring settlements
class TransformRingSettlementDataGadget : public GadgetT
{
public:
const unsigned int ringSize = 21 * 8;
VariableArrayT data;
Bitstream transformedData;
unsigned int numRings;
std::vector<XorArrayGadget> xorGadgets;
TransformRingSettlementDataGadget(
ProtoboardT& pb,
const std::string& prefix
) :
GadgetT(pb, prefix)
{
numRings = 0;
}
VariableArrayT result()
{
return flatten(transformedData.data);
}
void generate_r1cs_witness()
{
for (unsigned int i = 0; i < xorGadgets.size(); i++)
{
xorGadgets[i].generate_r1cs_witness();
}
}
void generate_r1cs_constraints(unsigned int numRings, const VariableArrayT& data)
{
this->numRings = numRings;
this->data = data;
assert(numRings > 0);
assert(numRings * ringSize == data.size());
// XOR compress
Bitstream compressedData;
compressedData.add(subArray(data, 0, numRings * ringSize));
/*for (unsigned int i = 1; i < numRings; i++)
{
unsigned int previousRingStart = (i - 1) * ringSize;
unsigned int ringStart = i * ringSize;
xorGadgets.emplace_back(pb, subArray(data, previousRingStart, 5 * 8),
subArray(data, ringStart, 5 * 8),
std::string("xor_") + std::to_string(i));
xorGadgets.back().generate_r1cs_constraints();
compressedData.add(xorGadgets.back().result());
compressedData.add(subArray(data, ringStart + 5 * 8, ringSize - 5 * 8));
}*/
// Transform
struct Range
{
unsigned int offset;
unsigned int length;
};
std::vector<std::vector<Range>> ranges;
ranges.push_back({{0, 32}}); // orderA.tradeHistoryData + orderB.tradeHistoryData
ranges.push_back({{32, 48}}); // orderA.accountID + orderB.accountID
ranges.push_back({{80, 24}}); // orderA.tokenS + orderB.tokenS
ranges.push_back({{104, 48}}); // orderA.fillS + orderB.fillS
ranges.push_back({{152, 8}}); // orderA.data
ranges.push_back({{160, 8}}); // orderB.data
for (const std::vector<Range>& subRanges : ranges)
{
for (unsigned int i = 0; i < numRings; i++)
{
for (const Range& subRange : subRanges)
{
unsigned int ringStart = i * ringSize;
transformedData.add(subArray(flatten(compressedData.data), ringStart + subRange.offset, subRange.length));
}
}
}
}
};
class RingSettlementGadget : public GadgetT
{
public:
const Constants& constants;
// Orders
OrderGadget orderA;
OrderGadget orderB;
// Balances
DynamicVariableGadget balanceS_A;
DynamicVariableGadget balanceB_A;
DynamicVariableGadget balanceS_B;
DynamicVariableGadget balanceB_B;
DynamicVariableGadget balanceA_P;
DynamicVariableGadget balanceB_P;
DynamicVariableGadget balanceA_O;
DynamicVariableGadget balanceB_O;
// Initial trading history roots
const VariableT tradingHistoryRootA_O;
const VariableT tradingHistoryRootB_O;
// Order fills
FloatGadget fillS_A;
FloatGadget fillS_B;
// Match orders
OrderMatchingGadget orderMatching;
// Calculate fees
FeeCalculatorGadget feeCalculatorA;
FeeCalculatorGadget feeCalculatorB;
/* Token Transfers */
// Actual trade
TransferGadget fillBB_from_balanceSA_to_balanceBB;
TransferGadget fillSB_from_balanceSB_to_balanceBA;
// Fees
TransferGadget feeA_from_balanceBA_to_balanceAO;
TransferGadget feeB_from_balanceBB_to_balanceBO;
// Rebates
TransferGadget rebateA_from_balanceAO_to_balanceBA;
TransferGadget rebateB_from_balanceBO_to_balanceBB;
// Protocol fees
TransferGadget protocolFeeA_from_balanceAO_to_balanceAP;
TransferGadget protocolFeeB_from_balanceBO_to_balanceBP;
// Update UserA
UpdateTradeHistoryGadget updateTradeHistory_A;
UpdateBalanceGadget updateBalanceS_A;
UpdateBalanceGadget updateBalanceB_A;
UpdateAccountGadget updateAccount_A;
// Update UserB
UpdateTradeHistoryGadget updateTradeHistory_B;
UpdateBalanceGadget updateBalanceS_B;
UpdateBalanceGadget updateBalanceB_B;
UpdateAccountGadget updateAccount_B;
// Update Protocol pool
UpdateBalanceGadget updateBalanceA_P;
UpdateBalanceGadget updateBalanceB_P;
// Update Operator
UpdateBalanceGadget updateBalanceA_O;
UpdateBalanceGadget updateBalanceB_O;
RingSettlementGadget(
ProtoboardT& pb,
const jubjub::Params& params,
const Constants& _constants,
const VariableT& exchangeID,
const VariableT& accountsRoot,
const VariableT& timestamp,
const VariableT& protocolTakerFeeBips,
const VariableT& protocolMakerFeeBips,
const VariableT& protocolBalancesRoot,
const VariableT& operatorBalancesRoot,
const std::string& prefix
) :
GadgetT(pb, prefix),
constants(_constants),
// Orders
orderA(pb, params, constants, exchangeID, FMT(prefix, ".orderA")),
orderB(pb, params, constants, exchangeID, FMT(prefix, ".orderB")),
// Balances
balanceS_A(pb, orderA.balanceSBefore.balance, FMT(prefix, ".balanceS_A")),
balanceB_A(pb, orderA.balanceBBefore.balance, FMT(prefix, ".balanceB_A")),
balanceS_B(pb, orderB.balanceSBefore.balance, FMT(prefix, ".balanceS_B")),
balanceB_B(pb, orderB.balanceBBefore.balance, FMT(prefix, ".balanceB_B")),
balanceA_P(pb, FMT(prefix, ".balanceA_P")),
balanceB_P(pb, FMT(prefix, ".balanceB_P")),
balanceA_O(pb, FMT(prefix, ".balanceA_O")),
balanceB_O(pb, FMT(prefix, ".balanceB_O")),
// Initial trading history roots
tradingHistoryRootA_O(make_variable(pb, FMT(prefix, ".tradingHistoryRootA_O"))),
tradingHistoryRootB_O(make_variable(pb, FMT(prefix, ".tradingHistoryRootB_O"))),
// Order fills
fillS_A(pb, constants, Float24Encoding, FMT(prefix, ".fillS_A")),
fillS_B(pb, constants, Float24Encoding, FMT(prefix, ".fillS_B")),
// Match orders
orderMatching(pb, constants, timestamp, orderA, orderB, fillS_A.value(), fillS_B.value(), FMT(prefix, ".orderMatching")),
// Calculate fees
feeCalculatorA(pb, constants, fillS_B.value(), protocolTakerFeeBips, orderA.feeBips.packed, orderA.rebateBips.packed, FMT(prefix, ".feeCalculatorA")),
feeCalculatorB(pb, constants, fillS_A.value(), protocolMakerFeeBips, orderB.feeBips.packed, orderB.rebateBips.packed, FMT(prefix, ".feeCalculatorB")),
/* Token Transfers */
// Actual trade
fillBB_from_balanceSA_to_balanceBB(pb, balanceS_A, balanceB_B, fillS_A.value(), FMT(prefix, ".fillBB_from_balanceSA_to_balanceBB")),
fillSB_from_balanceSB_to_balanceBA(pb, balanceS_B, balanceB_A, fillS_B.value(), FMT(prefix, ".fillSB_from_balanceSB_to_balanceBA")),
// Fees
feeA_from_balanceBA_to_balanceAO(pb, balanceB_A, balanceA_O, feeCalculatorA.getFee(), FMT(prefix, ".feeA_from_balanceBA_to_balanceAO")),
feeB_from_balanceBB_to_balanceBO(pb, balanceB_B, balanceB_O, feeCalculatorB.getFee(), FMT(prefix, ".feeB_from_balanceBB_to_balanceBO")),
// Rebates
rebateA_from_balanceAO_to_balanceBA(pb, balanceA_O, balanceB_A, feeCalculatorA.getRebate(), FMT(prefix, ".rebateA_from_balanceAO_to_balanceBA")),
rebateB_from_balanceBO_to_balanceBB(pb, balanceB_O, balanceB_B, feeCalculatorB.getRebate(), FMT(prefix, ".rebateB_from_balanceBO_to_balanceBB")),
// Protocol fees
protocolFeeA_from_balanceAO_to_balanceAP(pb, balanceA_O, balanceA_P, feeCalculatorA.getProtocolFee(), FMT(prefix, ".protocolFeeA_from_balanceAO_to_balanceAP")),
protocolFeeB_from_balanceBO_to_balanceBP(pb, balanceB_O, balanceB_P, feeCalculatorB.getProtocolFee(), FMT(prefix, ".protocolFeeB_from_balanceBO_to_balanceBP")),
// Update UserA
updateTradeHistory_A(pb, orderA.balanceSBefore.tradingHistory, subArray(orderA.orderID.bits, 0, NUM_BITS_TRADING_HISTORY),
{orderA.tradeHistoryBefore.filled, orderA.tradeHistoryBefore.orderID},
{orderMatching.getFilledAfter_A(), orderA.orderID.packed},
FMT(prefix, ".updateTradeHistory_A")),
updateBalanceS_A(pb, orderA.accountBefore.balancesRoot, orderA.tokenS.bits,
{balanceS_A.front(), orderA.balanceSBefore.tradingHistory},
{balanceS_A.back(), updateTradeHistory_A.result()},
FMT(prefix, ".updateBalanceS_A")),
updateBalanceB_A(pb, updateBalanceS_A.result(), orderA.tokenB.bits,
{balanceB_A.front(), orderA.balanceBBefore.tradingHistory},
{balanceB_A.back(), orderA.balanceBBefore.tradingHistory},
FMT(prefix, ".updateBalanceB_A")),
updateAccount_A(pb, accountsRoot, orderA.accountID.bits,
{orderA.accountBefore.publicKey.x, orderA.accountBefore.publicKey.y, orderA.accountBefore.nonce, orderA.accountBefore.balancesRoot},
{orderA.accountBefore.publicKey.x, orderA.accountBefore.publicKey.y, orderA.accountBefore.nonce, updateBalanceB_A.result()},
FMT(prefix, ".updateAccount_A")),
// Update UserB
updateTradeHistory_B(pb, orderB.balanceSBefore.tradingHistory, subArray(orderB.orderID.bits, 0, NUM_BITS_TRADING_HISTORY),
{orderB.tradeHistoryBefore.filled, orderB.tradeHistoryBefore.orderID},
{orderMatching.getFilledAfter_B(), orderB.orderID.packed},
FMT(prefix, ".updateTradeHistory_B")),
updateBalanceS_B(pb, orderB.accountBefore.balancesRoot, orderB.tokenS.bits,
{balanceS_B.front(), orderB.balanceSBefore.tradingHistory},
{balanceS_B.back(), updateTradeHistory_B.result()},
FMT(prefix, ".updateBalanceS_B")),
updateBalanceB_B(pb, updateBalanceS_B.result(), orderB.tokenB.bits,
{balanceB_B.front(), orderB.balanceBBefore.tradingHistory},
{balanceB_B.back(), orderB.balanceBBefore.tradingHistory},
FMT(prefix, ".updateBalanceB_B")),
updateAccount_B(pb, updateAccount_A.result(), orderB.accountID.bits,
{orderB.accountBefore.publicKey.x, orderB.accountBefore.publicKey.y, orderB.accountBefore.nonce, orderB.accountBefore.balancesRoot},
{orderB.accountBefore.publicKey.x, orderB.accountBefore.publicKey.y, orderB.accountBefore.nonce, updateBalanceB_B.result()},
FMT(prefix, ".updateAccount_B")),
// Update Protocol pool
updateBalanceA_P(pb, protocolBalancesRoot, orderA.tokenB.bits,
{balanceA_P.front(), constants.emptyTradeHistory},
{balanceA_P.back(), constants.emptyTradeHistory},
FMT(prefix, ".updateBalanceA_P")),
updateBalanceB_P(pb, updateBalanceA_P.result(), orderB.tokenB.bits,
{balanceB_P.front(), constants.emptyTradeHistory},
{balanceB_P.back(), constants.emptyTradeHistory},
FMT(prefix, ".updateBalanceB_P")),
// Update Operator
updateBalanceA_O(pb, operatorBalancesRoot, orderA.tokenB.bits,
{balanceA_O.front(), tradingHistoryRootA_O},
{balanceA_O.back(), tradingHistoryRootA_O},
FMT(prefix, ".updateBalanceA_O")),
updateBalanceB_O(pb, updateBalanceA_O.result(), orderB.tokenB.bits,
{balanceB_O.front(), tradingHistoryRootB_O},
{balanceB_O.back(), tradingHistoryRootB_O},
FMT(prefix, ".updateBalanceB_O"))
{
}
void generate_r1cs_witness(const RingSettlement& ringSettlement)
{
// Orders
orderA.generate_r1cs_witness(ringSettlement.ring.orderA,
ringSettlement.accountUpdate_A.before,
ringSettlement.balanceUpdateS_A.before,
ringSettlement.balanceUpdateB_A.before,
ringSettlement.tradeHistoryUpdate_A.before);
orderB.generate_r1cs_witness(ringSettlement.ring.orderB,
ringSettlement.accountUpdate_B.before,
ringSettlement.balanceUpdateS_B.before,
ringSettlement.balanceUpdateB_B.before,
ringSettlement.tradeHistoryUpdate_B.before);
// Balances before
balanceA_P.generate_r1cs_witness(ringSettlement.balanceUpdateA_P.before.balance);
balanceB_P.generate_r1cs_witness(ringSettlement.balanceUpdateB_P.before.balance);
balanceA_O.generate_r1cs_witness(ringSettlement.balanceUpdateA_O.before.balance);
balanceB_O.generate_r1cs_witness(ringSettlement.balanceUpdateB_O.before.balance);
// Trading history roots before
pb.val(tradingHistoryRootA_O) = ringSettlement.balanceUpdateA_O.before.tradingHistoryRoot;
pb.val(tradingHistoryRootB_O) = ringSettlement.balanceUpdateB_O.before.tradingHistoryRoot;
// Order fills
fillS_A.generate_r1cs_witness(ringSettlement.ring.fillS_A);
fillS_B.generate_r1cs_witness(ringSettlement.ring.fillS_B);
// Match orders
orderMatching.generate_r1cs_witness();
// Calculate fees
feeCalculatorA.generate_r1cs_witness();
feeCalculatorB.generate_r1cs_witness();
/* Token Transfers */
// Actual trade
fillBB_from_balanceSA_to_balanceBB.generate_r1cs_witness();
fillSB_from_balanceSB_to_balanceBA.generate_r1cs_witness();
// Fees
feeA_from_balanceBA_to_balanceAO.generate_r1cs_witness();
feeB_from_balanceBB_to_balanceBO.generate_r1cs_witness();
// Rebates
rebateA_from_balanceAO_to_balanceBA.generate_r1cs_witness();
rebateB_from_balanceBO_to_balanceBB.generate_r1cs_witness();
// Protocol fees
protocolFeeA_from_balanceAO_to_balanceAP.generate_r1cs_witness();
protocolFeeB_from_balanceBO_to_balanceBP.generate_r1cs_witness();
// Update UserA
updateTradeHistory_A.generate_r1cs_witness(ringSettlement.tradeHistoryUpdate_A.proof);
updateBalanceS_A.generate_r1cs_witness(ringSettlement.balanceUpdateS_A.proof);
updateBalanceB_A.generate_r1cs_witness(ringSettlement.balanceUpdateB_A.proof);
updateAccount_A.generate_r1cs_witness(ringSettlement.accountUpdate_A.proof);
// Update UserB
updateTradeHistory_B.generate_r1cs_witness(ringSettlement.tradeHistoryUpdate_B.proof);
updateBalanceS_B.generate_r1cs_witness(ringSettlement.balanceUpdateS_B.proof);
updateBalanceB_B.generate_r1cs_witness(ringSettlement.balanceUpdateB_B.proof);
updateAccount_B.generate_r1cs_witness(ringSettlement.accountUpdate_B.proof);
// Update Protocol pool
updateBalanceA_P.generate_r1cs_witness(ringSettlement.balanceUpdateA_P.proof);
updateBalanceB_P.generate_r1cs_witness(ringSettlement.balanceUpdateB_P.proof);
// Update Operator
updateBalanceA_O.generate_r1cs_witness(ringSettlement.balanceUpdateA_O.proof);
updateBalanceB_O.generate_r1cs_witness(ringSettlement.balanceUpdateB_O.proof);
}
void generate_r1cs_constraints()
{
// Orders
orderA.generate_r1cs_constraints();
orderB.generate_r1cs_constraints();
// Order fills
fillS_A.generate_r1cs_constraints();
fillS_B.generate_r1cs_constraints();
// Match orders
orderMatching.generate_r1cs_constraints();
// Calculate fees
feeCalculatorA.generate_r1cs_constraints();
feeCalculatorB.generate_r1cs_constraints();
/* Token Transfers */
// Actual trade
fillBB_from_balanceSA_to_balanceBB.generate_r1cs_constraints();
fillSB_from_balanceSB_to_balanceBA.generate_r1cs_constraints();
// Fees
feeA_from_balanceBA_to_balanceAO.generate_r1cs_constraints();
feeB_from_balanceBB_to_balanceBO.generate_r1cs_constraints();
// Rebates
rebateA_from_balanceAO_to_balanceBA.generate_r1cs_constraints();
rebateB_from_balanceBO_to_balanceBB.generate_r1cs_constraints();
// Protocol fees
protocolFeeA_from_balanceAO_to_balanceAP.generate_r1cs_constraints();
protocolFeeB_from_balanceBO_to_balanceBP.generate_r1cs_constraints();
// Update UserA
updateTradeHistory_A.generate_r1cs_constraints();
updateBalanceS_A.generate_r1cs_constraints();
updateBalanceB_A.generate_r1cs_constraints();
updateAccount_A.generate_r1cs_constraints();
// Update UserB
updateTradeHistory_B.generate_r1cs_constraints();
updateBalanceS_B.generate_r1cs_constraints();
updateBalanceB_B.generate_r1cs_constraints();
updateAccount_B.generate_r1cs_constraints();
// Update Protocol fee pool
updateBalanceA_P.generate_r1cs_constraints();
updateBalanceB_P.generate_r1cs_constraints();
// Update Operator
updateBalanceA_O.generate_r1cs_constraints();
updateBalanceB_O.generate_r1cs_constraints();
}
const std::vector<VariableArrayT> getPublicData() const
{
return
{
VariableArrayT(1, constants.zero), VariableArrayT(1, orderA.tradeHistory.getOverwrite()), subArray(orderA.orderID.bits, 0, NUM_BITS_TRADING_HISTORY),
VariableArrayT(1, constants.zero), VariableArrayT(1, orderB.tradeHistory.getOverwrite()), subArray(orderB.orderID.bits, 0, NUM_BITS_TRADING_HISTORY),
orderA.accountID.bits,
orderB.accountID.bits,
VariableArrayT(2, constants.zero), orderA.tokenS.bits,
VariableArrayT(2, constants.zero), orderB.tokenS.bits,
fillS_A.bits(),
fillS_B.bits(),
orderA.buy.bits, VariableArrayT(1, orderA.hasRebate()), orderA.feeOrRebateBips.bits,
orderB.buy.bits, VariableArrayT(1, orderB.hasRebate()), orderB.feeOrRebateBips.bits,
};
}
const VariableT& getNewAccountsRoot() const
{
return updateAccount_B.result();
}
const VariableT& getNewProtocolBalancesRoot() const
{
return updateBalanceB_P.result();
}
const VariableT& getNewOperatorBalancesRoot() const
{
return updateBalanceB_O.result();
}
};
class RingSettlementCircuit : public Circuit
{
public:
PublicDataGadget publicData;
Constants constants;
jubjub::Params params;
// State
AccountGadget accountBefore_O;
AccountGadget accountBefore_P;
// Inputs
DualVariableGadget exchangeID;
DualVariableGadget merkleRootBefore;
DualVariableGadget merkleRootAfter;
DualVariableGadget timestamp;
DualVariableGadget protocolTakerFeeBips;
DualVariableGadget protocolMakerFeeBips;
DualVariableGadget operatorAccountID;
// Increment the nonce of the Operator
AddGadget nonce_after;
// Transform the ring data
TransformRingSettlementDataGadget transformData;
// Signature
Poseidon_gadget_T<3, 1, 6, 51, 2, 1> hash;
SignatureVerifier signatureVerifier;
// Ring settlements
bool onchainDataAvailability;
unsigned int numRings;
std::vector<RingSettlementGadget> ringSettlements;
Bitstream dataAvailabityData;
// Update Protocol pool
std::unique_ptr<UpdateAccountGadget> updateAccount_P;
// Update Operator
std::unique_ptr<UpdateAccountGadget> updateAccount_O;
RingSettlementCircuit(ProtoboardT& pb, const std::string& prefix) :
Circuit(pb, prefix),
publicData(pb, FMT(prefix, ".publicData")),
constants(pb, FMT(prefix, ".constants")),
// State
accountBefore_O(pb, FMT(prefix, ".accountBefore_O")),
accountBefore_P(pb, FMT(prefix, ".accountBefore_P")),
// Inputs
exchangeID(pb, NUM_BITS_EXCHANGE_ID, FMT(prefix, ".exchangeID")),
merkleRootBefore(pb, 256, FMT(prefix, ".merkleRootBefore")),
merkleRootAfter(pb, 256, FMT(prefix, ".merkleRootAfter")),
timestamp(pb, NUM_BITS_TIMESTAMP, FMT(prefix, ".timestamp")),
protocolTakerFeeBips(pb, NUM_BITS_PROTOCOL_FEE_BIPS, FMT(prefix, ".protocolTakerFeeBips")),
protocolMakerFeeBips(pb, NUM_BITS_PROTOCOL_FEE_BIPS, FMT(prefix, ".protocolMakerFeeBips")),
operatorAccountID(pb, NUM_BITS_ACCOUNT, FMT(prefix, ".operatorAccountID")),
// Increment the nonce of the Operator
nonce_after(pb, accountBefore_O.nonce, constants.one, NUM_BITS_NONCE, FMT(prefix, ".nonce_after")),
// Transform the ring data
transformData(pb, FMT(prefix, ".transformData")),
// Signature
hash(pb, var_array({
publicData.publicInput,
accountBefore_O.nonce
}), FMT(this->annotation_prefix, ".hash")),
signatureVerifier(pb, params, constants, accountBefore_O.publicKey, hash.result(), FMT(prefix, ".signatureVerifier"))
{
}
void generateConstraints(bool onchainDataAvailability, unsigned int blockSize) override
{
this->onchainDataAvailability = onchainDataAvailability;
this->numRings = blockSize;
constants.generate_r1cs_constraints();
// Inputs
exchangeID.generate_r1cs_constraints(true);
merkleRootBefore.generate_r1cs_constraints(true);
merkleRootAfter.generate_r1cs_constraints(true);
timestamp.generate_r1cs_constraints(true);
protocolTakerFeeBips.generate_r1cs_constraints(true);
protocolMakerFeeBips.generate_r1cs_constraints(true);
operatorAccountID.generate_r1cs_constraints(true);
// Increment the nonce of the Operator
nonce_after.generate_r1cs_constraints();
// Ring settlements
ringSettlements.reserve(numRings);
for (size_t j = 0; j < numRings; j++)
{
const VariableT ringAccountsRoot = (j == 0) ? merkleRootBefore.packed : ringSettlements.back().getNewAccountsRoot();
const VariableT& ringProtocolBalancesRoot = (j == 0) ? accountBefore_P.balancesRoot : ringSettlements.back().getNewProtocolBalancesRoot();
const VariableT& ringOperatorBalancesRoot = (j == 0) ? accountBefore_O.balancesRoot : ringSettlements.back().getNewOperatorBalancesRoot();
ringSettlements.emplace_back(
pb,
params,
constants,
exchangeID.packed,
ringAccountsRoot,
timestamp.packed,
protocolTakerFeeBips.packed,
protocolMakerFeeBips.packed,
ringProtocolBalancesRoot,
ringOperatorBalancesRoot,
std::string("trade_") + std::to_string(j)
);
ringSettlements.back().generate_r1cs_constraints();
if (onchainDataAvailability)
{
// Store data from ring settlement
dataAvailabityData.add(ringSettlements.back().getPublicData());
}
}
// Update Protocol pool
updateAccount_P.reset(new UpdateAccountGadget(pb, ringSettlements.back().getNewAccountsRoot(), constants.zeroAccount,
{accountBefore_P.publicKey.x, accountBefore_P.publicKey.y, accountBefore_P.nonce, accountBefore_P.balancesRoot},
{accountBefore_P.publicKey.x, accountBefore_P.publicKey.y, accountBefore_P.nonce, ringSettlements.back().getNewProtocolBalancesRoot()},
FMT(annotation_prefix, ".updateAccount_P")));
updateAccount_P->generate_r1cs_constraints();
// Update Operator
updateAccount_O.reset(new UpdateAccountGadget(pb, updateAccount_P->result(), operatorAccountID.bits,
{accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, accountBefore_O.nonce, accountBefore_O.balancesRoot},
{accountBefore_O.publicKey.x, accountBefore_O.publicKey.y, nonce_after.result(), ringSettlements.back().getNewOperatorBalancesRoot()},
FMT(annotation_prefix, ".updateAccount_O")));
updateAccount_O->generate_r1cs_constraints();
// Public data
publicData.add(exchangeID.bits);
publicData.add(merkleRootBefore.bits);
publicData.add(merkleRootAfter.bits);
publicData.add(timestamp.bits);
publicData.add(protocolTakerFeeBips.bits);
publicData.add(protocolMakerFeeBips.bits);
if (onchainDataAvailability)
{
publicData.add(operatorAccountID.bits);
// Transform the ring data
transformData.generate_r1cs_constraints(numRings, flattenReverse(dataAvailabityData.data));
publicData.add(reverse(transformData.result()));
}
publicData.generate_r1cs_constraints();
// Signature
hash.generate_r1cs_constraints();
signatureVerifier.generate_r1cs_constraints();
// Check the new merkle root
requireEqual(pb, updateAccount_O->result(), merkleRootAfter.packed, "newMerkleRoot");
}
bool generateWitness(const RingSettlementBlock& block)
{
if (block.ringSettlements.size() != numRings)
{
std::cout << "Invalid number of rings: " << block.ringSettlements.size() << std::endl;
return false;
}
constants.generate_r1cs_witness();
// State
accountBefore_O.generate_r1cs_witness(block.accountUpdate_O.before);
accountBefore_P.generate_r1cs_witness(block.accountUpdate_P.before);
// Inputs
exchangeID.generate_r1cs_witness(pb, block.exchangeID);
merkleRootBefore.generate_r1cs_witness(pb, block.merkleRootBefore);
merkleRootAfter.generate_r1cs_witness(pb, block.merkleRootAfter);
timestamp.generate_r1cs_witness(pb, block.timestamp);
protocolTakerFeeBips.generate_r1cs_witness(pb, block.protocolTakerFeeBips);
protocolMakerFeeBips.generate_r1cs_witness(pb, block.protocolMakerFeeBips);
operatorAccountID.generate_r1cs_witness(pb, block.operatorAccountID);
// Increment the nonce of the Operator
nonce_after.generate_r1cs_witness();
// Ring settlements
#ifdef MULTICORE
#pragma omp parallel for
#endif
for(unsigned int i = 0; i < block.ringSettlements.size(); i++)
{
ringSettlements[i].generate_r1cs_witness(block.ringSettlements[i]);
}
// Update Protocol pool
updateAccount_P->generate_r1cs_witness(block.accountUpdate_P.proof);
// Update Operator
updateAccount_O->generate_r1cs_witness(block.accountUpdate_O.proof);
// Transform the ring data
if (onchainDataAvailability)
{
transformData.generate_r1cs_witness();
}
// Public data
publicData.generate_r1cs_witness();
// Signature
hash.generate_r1cs_witness();
signatureVerifier.generate_r1cs_witness(block.signature);
return true;
}
bool generateWitness(const json& input) override
{
return generateWitness(input.get<Loopring::RingSettlementBlock>());
}
BlockType getBlockType() override
{
return BlockType::RingSettlement;
}
unsigned int getBlockSize() override
{
return numRings;
}
void printInfo() override
{
std::cout << pb.num_constraints() << " constraints (" << (pb.num_constraints() / numRings) << "/ring)" << std::endl;
}
};
}
#endif
|
sidh_vow_base.c | #include <assert.h>
#include "sidh_vow_base.h"
#include "types/triples.h"
#include "types/state.h"
#include "curve_math.h"
#include "bintree.h"
#include "triples.h"
// Functions for swig interface
#include "swig_helpers.c"
#include "state.c"
/* Initializations */
static st_t init_st(uint64_t nwords_state)
{
st_t s;
s.words = calloc(nwords_state, sizeof(digit_t));
return s;
}
static void free_st(st_t *s)
{
free(s->words);
}
static void ConcatArray(unsigned long *cat, unsigned long *A, unsigned long lenA, unsigned long *B, unsigned long lenB)
{
unsigned int i;
for (i = 1; i < lenA + 1; i++)
cat[i] = A[i - 1];
for (i = 0; i < lenB; i++)
cat[lenA + i + 1] = B[i];
}
static unsigned long OptStrat(unsigned long *strat, const unsigned long n, const double p, const double q)
{
unsigned int i, j, b, ctr = 2;
/* Maximum size of strategy = 250 */
double C[250], newCpqs[250], newCpq, tmpCpq;
unsigned long S[250][250];
unsigned long lens[250];
lens[1] = 0;
S[1][0] = 0;
C[1] = 0;
for (i = 2; i < n + 1; i++) {
for (b = 1; b < i; b++)
newCpqs[b] = C[i - b] + C[b] + b * p + (i - b) * q;
newCpq = newCpqs[1];
b = 1;
for (j = 2; j < i; j++) {
tmpCpq = newCpqs[j];
if (newCpq > tmpCpq) {
newCpq = tmpCpq;
b = j;
}
}
S[ctr][0] = b;
ConcatArray(S[ctr], S[i - b], lens[i - b], S[b], lens[b]);
C[ctr] = newCpqs[b];
lens[ctr] = 1 + lens[i - b] + lens[b];
ctr++;
}
for (i = 0; i < lens[ctr - 1]; i++)
strat[i] = S[ctr - 1][i];
return lens[ctr - 1];
}
static void xDBL_affine(const point_proj_t P, point_proj_t Q, const f2elm_t a24)
{
f2elm_t t0, t1;
fp2sub(P->X, P->Z, t0); // t0 = X1-Z1
fp2add(P->X, P->Z, t1); // t1 = X1+Z1
fp2sqr_mont(t0, t0); // t0 = (X1-Z1)^2
fp2sqr_mont(t1, t1); // t1 = (X1+Z1)^2
fp2mul_mont(t0, t1, Q->X); // X2 = C24*(X1-Z1)^2*(X1+Z1)^2
fp2sub(t1, t0, t1); // t1 = (X1+Z1)^2-(X1-Z1)^2
fp2mul_mont(a24, t1, Q->Z); // t0 = A24plus*[(X1+Z1)^2-(X1-Z1)^2]
fp2add(Q->Z, t0, Q->Z); // Z2 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2
fp2mul_mont(Q->Z, t1, Q->Z); // Z2 = [A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2]*[(X1+Z1)^2-(X1-Z1)^2]
}
static void xDBLe_affine(const point_proj_t P, point_proj_t Q, const f2elm_t a24, const int e)
{ // Computes [2^e](X:Z) on Montgomery curve with projective constant via e repeated doublings.
// Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A+2C and 4C.
// Output: projective Montgomery x-coordinates Q <- (2^e)*P.
copy_words((digit_t *)P, (digit_t *)Q, 2 * 2 * NWORDS_FIELD);
for (int i = 0; i < e; i++)
xDBL_affine(Q, Q, a24);
}
static void GetFourIsogenyWithKernelXeqZ(point_proj_t A24, const f2elm_t a24)
{
fp2copy(a24, A24->X);
fp2copy(a24, A24->Z);
fpsub((digit_t *)A24->Z, (digit_t *)&Montgomery_one, (digit_t *)A24->Z);
}
static void EvalFourIsogenyWithKernelXeqZ(point_proj_t S, const f2elm_t a24)
{
f2elm_t T0, T1, T2;
fpzero(T1[1]); // Set RZ = 1
fpcopy((digit_t *)&Montgomery_one, T1[0]);
fp2add(S->X, S->Z, T0);
fp2sub(S->X, S->Z, T2);
fp2sqr_mont(T0, T0);
fp2sqr_mont(T2, T2);
fp2sub(T1, a24, T1);
fp2add(T1, T1, T1);
fp2add(T1, T1, T1);
fp2mul_mont(T1, S->X, T1);
fp2mul_mont(T1, S->Z, T1);
fp2mul_mont(T1, T2, S->Z);
fp2sub(T0, T1, T1);
fp2mul_mont(T0, T1, S->X);
}
static void GetFourIsogenyWithKernelXeqMinZ(point_proj_t A24, const f2elm_t a24)
{
fp2copy(a24, A24->X);
fp2copy(a24, A24->Z);
fpsub((digit_t *)A24->X, (digit_t *)&Montgomery_one, (digit_t *)A24->X);
}
static void EvalFourIsogenyWithKernelXeqMinZ(point_proj_t S, const f2elm_t a24)
{
f2elm_t T0, T1, T2;
fp2add(S->X, S->Z, T2);
fp2sub(S->X, S->Z, T0);
fp2sqr_mont(T2, T2);
fp2sqr_mont(T0, T0);
fp2add(a24, a24, T1);
fp2add(T1, T1, T1);
fp2mul_mont(T1, S->X, T1);
fp2mul_mont(T1, S->Z, T1);
fp2mul_mont(T1, T2, S->Z);
fp2neg(S->Z);
fp2add(T0, T1, T1);
fp2mul_mont(T0, T1, S->X);
}
static void FourwayInv(f2elm_t X, f2elm_t Y, f2elm_t Z, f2elm_t T)
{
f2elm_t Xt, Zt, XY, ZT, XYZT;
fp2copy(X, Xt);
fp2copy(Z, Zt);
fp2mul_mont(X, Y, XY);
fp2mul_mont(Z, T, ZT);
fp2mul_mont(XY, ZT, XYZT);
fp2inv_mont(XYZT); /* 1 / XYZT */
fp2mul_mont(ZT, XYZT, ZT); /* 1 / XY */
fp2mul_mont(XY, XYZT, XY); /* 1 / ZT */
fp2mul_mont(ZT, Y, X);
fp2mul_mont(ZT, Xt, Y);
fp2mul_mont(XY, T, Z);
fp2mul_mont(XY, Zt, T);
}
/* Simple functions on states */
static unsigned char GetC_SIDH(const st_t *s)
{
return (s->bytes[0] & 0x01);
}
static unsigned char GetB_SIDH(const st_t *s)
{
return ((s->bytes[0] & 0x06) >> 1);
}
static void SetB_SIDH(st_t *s, const unsigned char t)
{
/* Assumes that b is set to 0x03 */
s->bytes[0] &= ((t << 1) | 0xF9);
}
static void copy_st(st_t *r, const st_t *s, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
r->words[i] = s->words[i];
}
static void copy_st2uint64(uint64_t *r, const st_t *s, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
r[i] = s->words[i];
}
static void copy_uint642st(st_t *r, const uint64_t *s, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
r->words[i] = s[i];
}
static void SwapStSIDH(st_t *r, st_t *s, uint64_t nwords_state)
{
st_t t = init_st(nwords_state);
copy_st(&t, r, nwords_state);
copy_st(r, s, nwords_state);
copy_st(s, &t, nwords_state);
free_st(&t);
}
bool is_equal_st(const st_t *s, const st_t *t, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
{
if (s->words[i] != t->words[i])
return false;
}
return true;
}
static bool is_equal_st_words(const st_t *s, const uint64_t *r, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
{
if (s->words[i] != r[i])
return false;
}
return true;
}
bool IsEqualJinvSIDH(unsigned char j0[FP2_ENCODED_BYTES], unsigned char j1[FP2_ENCODED_BYTES])
{
for (unsigned int i = 0; i < FP2_ENCODED_BYTES; i++)
{
if (j0[i] != j1[i])
return false;
}
return true;
}
void copy_trip(trip_t *s, const trip_t *t, const uint64_t nwords_state)
{
copy_st(&s->current_state, &t->current_state, nwords_state);
s->current_steps = t->current_steps;
copy_st(&s->initial_state, &t->initial_state, nwords_state);
}
/* Functions for vOW */
bool DistinguishedSIDH(private_state_t *private_state)
{
/* Divide distinguishedness over interval to avoid bad cases */
assert(private_state->MEMORY_LOG_SIZE > EXTRA_MEM_LOG_SIZE);
uint64_t val;
val = private_state->current.current_state.words[0] >> (private_state->MEMORY_LOG_SIZE + EXTRA_MEM_LOG_SIZE);
val += (uint64_t)private_state->function_version * (uint64_t)private_state->DIST_BOUND;
val &= (((uint64_t)1 << (private_state->NBITS_STATE - EXTRA_MEM_LOG_SIZE - private_state->MEMORY_LOG_SIZE)) - 1);
return (val <= (uint64_t)private_state->DIST_BOUND);
}
uint64_t MemIndexSIDH(private_state_t *private_state)
{
assert(private_state->MEMORY_SIZE <= (pow(2, RADIX - 3) - 1)); /* Assumes that MEMORY_SIZE <= 2^RADIX */
return (uint64_t)(((private_state->current.current_state.words[0] >> EXTRA_MEM_LOG_SIZE) + private_state->random_functions) & private_state->MEMORY_SIZE_MASK);
}
static unsigned int GetMSBSIDH(const unsigned char *m, unsigned long nbits_state)
{
int msb = nbits_state;
int bit = (m[(msb - 1) >> 3] >> ((msb - 1) & 0x07)) & 1;
while ((bit == 0) && (msb > 0))
{
msb--;
bit = (m[(msb - 1) >> 3] >> ((msb - 1) & 0x07)) & 1;
}
return msb;
}
void UpdateSIDH(private_state_t *private_state)
{
uint64_t i, temp;
unsigned char j[FP2_ENCODED_BYTES];
UpdateStSIDH(j, &private_state->current.current_state, &private_state->current.current_state, private_state);
private_state->number_steps_collect += 1;
if (private_state->HANSEL_GRETEL) {
if (private_state->crumbs.num_crumbs < private_state->crumbs.max_crumbs) {
copy_st2uint64(&private_state->crumbs.crumbs[private_state->crumbs.position], &private_state->current.current_state, private_state->NWORDS_STATE);
private_state->crumbs.positions[private_state->crumbs.position] = private_state->crumbs.position;
private_state->crumbs.index_crumbs[private_state->crumbs.position] = private_state->crumbs.position;
private_state->crumbs.num_crumbs++;
} else if (private_state->crumbs.position - private_state->crumbs.positions[private_state->crumbs.max_crumbs - 1] == private_state->crumbs.max_dist) {
temp = private_state->crumbs.index_crumbs[private_state->crumbs.index_position];
for (i = private_state->crumbs.index_position; i < private_state->crumbs.max_crumbs - 1; i++) {
// Updating table with crumb indices for the crump table
private_state->crumbs.index_crumbs[i] = private_state->crumbs.index_crumbs[i + 1];
}
private_state->crumbs.index_crumbs[private_state->crumbs.max_crumbs - 1] = temp;
private_state->crumbs.index_position++;
if (private_state->crumbs.index_position > private_state->crumbs.max_crumbs - 1)
private_state->crumbs.index_position = 0;
copy_st2uint64(&private_state->crumbs.crumbs[temp], &private_state->current.current_state, private_state->NWORDS_STATE); // Inserting a new crumb at the end of the crumb table
for (i = private_state->crumbs.scratch_position; i < private_state->crumbs.max_crumbs - 1; i++) {
// Updating table with crumb positions
private_state->crumbs.positions[i] = private_state->crumbs.positions[i + 1];
}
private_state->crumbs.positions[private_state->crumbs.max_crumbs - 1] = private_state->crumbs.position;
private_state->crumbs.swap_position += 2 * private_state->crumbs.real_dist;
private_state->crumbs.scratch_position++;
if (private_state->crumbs.swap_position > private_state->crumbs.max_crumbs - 1) {
// Kind of cumbersome, maybe this can be simplified (but not time critical)
private_state->crumbs.swap_position = 0;
private_state->crumbs.real_dist <<= 1;
}
if (private_state->crumbs.scratch_position > private_state->crumbs.max_crumbs - 1) {
private_state->crumbs.scratch_position = 0;
private_state->crumbs.max_dist <<= 1;
private_state->crumbs.swap_position = private_state->crumbs.max_dist;
}
}
private_state->crumbs.position++;
}
}
void UpdateRandomFunctionSIDH(shared_state_t *S, private_state_t *private_state)
{
private_state->function_version++;
// reset "resync done" flag
if (private_state->thread_id == 0) {
S->resync_cores[0] = 0;
}
}
static inline bool BacktrackSIDH_core(trip_t *c0, trip_t *c1, shared_state_t *S, private_state_t *private_state)
{
// printf("Backtracking\n");
unsigned long L, i;
st_t c0_, c1_;
unsigned char jinv0[FP2_ENCODED_BYTES], jinv1[FP2_ENCODED_BYTES];
f2elm_t jinv;
(void)private_state;
// Make c0 have the largest number of steps
if (c0->current_steps < c1->current_steps) {
SwapStSIDH(&c0->initial_state, &c1->initial_state, private_state->NWORDS_STATE);
L = (unsigned long)(c1->current_steps - c0->current_steps);
} else {
L = (unsigned long)(c0->current_steps - c1->current_steps);
}
// Catch up the trails
for (i = 0; i < L; i++) {
UpdateStSIDH(jinv0, &c0->initial_state, &c0->initial_state, private_state);
private_state->number_steps_locate += 1;
}
if (is_equal_st(&c0->initial_state, &c1->initial_state, private_state->NWORDS_STATE)) {
// printf("Robin Hood\n");
return false; // Robin Hood
}
c0_ = init_st(private_state->NWORDS_STATE);
c1_ = init_st(private_state->NWORDS_STATE);
for (i = 0; i < c1->current_steps + 1; i++) {
UpdateStSIDH(jinv0, &c0_, &c0->initial_state, private_state);
private_state->number_steps_locate += 1;
UpdateStSIDH(jinv1, &c1_, &c1->initial_state, private_state);
private_state->number_steps_locate += 1;
if (IsEqualJinvSIDH(jinv0, jinv1)) {
/* Record collision */
private_state->collisions += 1;
// printf("collision\n");
if (private_state->collect_vow_stats) {
#pragma omp critical
{
insertTree(&S->dist_cols, c0->initial_state, c1->initial_state, private_state->NWORDS_STATE);
}
}
// free tmp states
free_st(&c0_);
free_st(&c1_);
if (GetC_SIDH(&c0->initial_state) == GetC_SIDH(&c1->initial_state)) {
// printf("GetC\n");
return false;
} else {
printf("GOLDEN COLLISION\n");
fp2_decode(jinv0, jinv);
assert(fp2_is_equal(jinv, private_state->jinv)); /* Verify that we found the right one*/
return true;
}
} else {
copy_st(&c0->initial_state, &c0_, private_state->NWORDS_STATE);
copy_st(&c1->initial_state, &c1_, private_state->NWORDS_STATE);
}
}
/* Should never reach here */
return false;
}
static inline bool BacktrackSIDH_Hansel_Gretel(trip_t *c_mem, trip_t *c_crumbs, shared_state_t *S, private_state_t *private_state)
{
uint64_t L;
trip_t c0_, cmem;
uint64_t i, k, index;
uint64_t crumb;
bool resp, equal;
unsigned char j[FP2_ENCODED_BYTES];
cmem = init_trip(private_state->NWORDS_STATE);
copy_trip(&cmem, c_mem, private_state->NWORDS_STATE);
// Make the memory trail (without crumbs) at most the length of the crumbs trail.
if (cmem.current_steps > c_crumbs->current_steps) {
L = cmem.current_steps - c_crumbs->current_steps;
for (i = 0; i < L; i++) {
UpdateStSIDH(j, &cmem.initial_state, &cmem.initial_state, private_state);
private_state->number_steps_locate += 1;
}
cmem.current_steps = c_crumbs->current_steps;
}
// Check for Robin Hood
if (is_equal_st(&cmem.initial_state, &c_crumbs->initial_state, private_state->NWORDS_STATE))
return false;
// The memory path is L steps shorter than the crumbs path.
L = c_crumbs->current_steps - cmem.current_steps;
k = 0;
// Since there has been at least one step, there is at least one crumb.
// Crumbs only store intermediate points, not the initial state and not
// necessarily the current state.
index = private_state->crumbs.positions[0] + 1;
while ((L > index) && (k + 1 < private_state->crumbs.num_crumbs)) {
// There are still crumbs to check and we haven't found the next crumb to reach.
k++;
index = private_state->crumbs.positions[k] + 1;
}
// Either have found the next crumb or ran out of crumbs to check.
if (L > index) {
// Ran out of crumbs to check, i.e. already in the interval beyond the last crumb.
// Trails collide after last crumb.
// Call original BacktrackGen on memory trail and shortened crumbs trail.
copy_uint642st(&c_crumbs->initial_state, &private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k]], private_state->NWORDS_STATE);
c_crumbs->current_steps -= (private_state->crumbs.positions[k] + 1);
resp = BacktrackSIDH_core(&cmem, c_crumbs, S, private_state);
} else {
// Next crumb to check lies before (or is) the last crumb.
c0_ = init_trip(private_state->NWORDS_STATE);
copy_trip(&c0_, &cmem, private_state->NWORDS_STATE);
do
{
cmem.current_steps = c0_.current_steps;
copy_st(&cmem.initial_state, &c0_.initial_state, private_state->NWORDS_STATE);
crumb = private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k]];
index = private_state->crumbs.positions[k] + 1;
L = cmem.current_steps - (c_crumbs->current_steps - index);
for (i = 0; i < L; i++) {
UpdateStSIDH(j, &c0_.initial_state, &c0_.initial_state, private_state);
private_state->number_steps_locate += 1;
}
c0_.current_steps -= L;
k++;
equal = is_equal_st_words(&c0_.initial_state, &crumb, private_state->NWORDS_STATE);
} while (!equal && k < private_state->crumbs.num_crumbs);
// Either found the colliding crumb or moved to the interval beyond the last crumb.
if (equal) { // Have a colliding crumb
copy_uint642st(&cmem.current_state, &crumb, private_state->NWORDS_STATE);
cmem.current_steps -= c0_.current_steps;
if (k == 1) {
c0_.current_steps = private_state->crumbs.positions[0] + 1;
copy_uint642st(&c0_.initial_state, c_crumbs->initial_state.words, private_state->NWORDS_STATE);
} else {
c0_.current_steps = private_state->crumbs.positions[k - 1] - private_state->crumbs.positions[k - 2];
copy_uint642st(&c0_.initial_state, &private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k - 2]], private_state->NWORDS_STATE);
}
copy_uint642st(&c0_.current_state, &crumb, private_state->NWORDS_STATE);
} else { // Collision happens after the last crumb.
cmem.current_steps = c0_.current_steps;
copy_uint642st(&cmem.initial_state, &crumb, private_state->NWORDS_STATE);
}
resp = BacktrackSIDH_core(&cmem, &c0_, S, private_state);
free_trip(&c0_);
}
free_trip(&cmem);
return resp;
}
bool BacktrackSIDH(trip_t *c0, trip_t *c1, shared_state_t *S, private_state_t *private_state)
{ // Backtrack function selection
if (private_state->HANSEL_GRETEL)
return BacktrackSIDH_Hansel_Gretel(c0, c1, S, private_state);
else
return BacktrackSIDH_core(c0, c1, S, private_state);
}
|
ParticleFilterOMP.h | //------------------------------------------------------------------------
// ____ _ _
// / ___|____ _ _ ____ ____| |__ | |
// | | / ___| | | | _ \/ ___| _ \| |
// | |___| | | |_| | | | | |___| | | ||_|
// \____|_| \_____|_| |_|\____|_| |_|(_) Media benchmarks
//
// 2006, Intel Corporation, licensed under Apache 2.0
//
// file : ParticleFilterOMP.h
// author : Scott Ettinger - scott.m.ettinger@intel.com
//
// description : OpenMP parallelized version of the particle filter
// object derived from ParticleFilter.h
//
// modified :
//--------------------------------------------------------------------------
#ifndef PARTICLEFILTEROMP_H
#define PARTICLEFILTEROMP_H
#include <omp.h>
#include "ParticleFilter.h"
template<class T>
class ParticleFilterOMP : public ParticleFilter<T> {
using ParticleFilter<T>:: mModel;
using ParticleFilter<T>:: mWeights;
using ParticleFilter<T>:: mParticles;
using ParticleFilter<T>:: mNewParticles;
using ParticleFilter<T>:: mBestParticle;
using ParticleFilter<T>:: mNParticles;
using ParticleFilter<T>:: mMinParticles;
using ParticleFilter<T>:: mBins;
using ParticleFilter<T>:: mRnd;
typedef typename ParticleFilter<T>::fpType fpType;
typedef typename ParticleFilter<T>::Vectorf Vectorf;
protected:
std::vector<int> mIndex; //list of particles to regenerate
//calculate particle weights - threaded version
//calculate particle weights based on model likelihood
void CalcWeights(std::vector<Vectorf > &particles);
//New particle generation - threaded version
void GenerateNewParticles(int k);
};
//Calculate particle weights (mWeights) and find highest likelihood particle.
//computes an optimal annealing factor and scales the likelihoods.
template<class T>
void ParticleFilterOMP<T>::CalcWeights(std::vector<Vectorf > &particles)
{
std::vector<unsigned char> valid(particles.size());
mBestParticle = 0;
fpType total = 0, best = 0, minWeight = 1e30f, annealingFactor = 1;
mWeights.resize(particles.size());
int np = (int)particles.size(), j;
//OpenMP parallelized loop to compute log-likelihoods
#pragma omp parallel for
for(j = 0; j < np; j++)
{
bool vflag;
int n = omp_get_thread_num();
//compute log-likelihood weights for each particle
mWeights[j] = mModel->LogLikelihood(particles[j], vflag, n);
valid[j] = vflag ? 1 : 0;
}
uint i = 0;
while(i < particles.size())
{
if(!valid[i]) //if not valid(model prior), remove the particle from the list
{
particles[i] = particles[particles.size() - 1];
mWeights[i] = mWeights[particles.size() - 1];
valid[i] = valid[valid.size() - 1];
particles.pop_back(); mWeights.pop_back(); valid.pop_back();
}
else
{
minWeight = std::min(mWeights[i++], minWeight); //find minimum log-likelihood
}
}
//bail out if not enough valid particles
if((int)particles.size() < mMinParticles) return;
mWeights -= minWeight; //shift weights to zero for numerical stability
if(mModel->StdDevs().size() > 1) {
//calculate annealing factor if more than 1 step
annealingFactor = BetaAnnealingFactor(mWeights, 0.5f);
}
for(i = 0; i < mWeights.size(); i++)
{
double wa = annealingFactor * mWeights[i];
//exponentiate log-likelihoods scaled by annealing factor
mWeights[i] = (float)exp(wa);
total += mWeights[i]; //save sum of all weights
if(i == 0 || mWeights[i] > best) //find highest likelihood particle
{
best = mWeights[i];
mBestParticle = i;
}
}
mWeights *= fpType(1.0) / total; //normalize weights
}
//generate new particles distributed with std deviation given
//by the model annealing parameter - threaded
template<class T>
void ParticleFilterOMP<T>::GenerateNewParticles(int k)
{
int p = 0;
mNewParticles.resize(mNParticles);
mIndex.resize(mNParticles);
for(int i = 0; i < (int)mBins.size(); i++) {
for(uint j = 0; j < mBins[i]; j++) {
//index particles to be regenerated
mIndex[p++] = i;
}
}
//distribute new particles randomly according to model stdDevs
#pragma omp parallel for
for(int i = 0; i < mNParticles; i++)
{
//add new particle for each entry in each bin distributed randomly
//about duplicated particle
mNewParticles[i] = mParticles[mIndex[i]];
this->AddGaussianNoise(mNewParticles[i], mModel->StdDevs()[k], mRnd[i]);
}
}
#endif
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
if (GetPixelWriteMask(next,p) <= (QuantumRange/2))
{
SetPixelBackgoundColor(append_image,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
continue;
}
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside != MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->read_mask=image->read_mask;
clone_image->write_mask=image->write_mask;
clone_image->alpha_trait=image->alpha_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImage)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
mask_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mask_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
mask_image->read_mask=MagickFalse;
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
foo;
foo=(ssize_t) strtol(q,&q,10);
(void) foo;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t)
(MagickPathExtent-(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
/* FUTURE: Compare update with code from InterpretImageProperties()
Note that a 'filename:' property should not need depth recursion.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),option,(size_t)
(MagickPathExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o Alpha: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) > (QuantumRange/2))
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth > (8*sizeof(MagickSizeType)))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*component != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) ResetMagickMemory(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0.0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=(Quantum) 0;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=QuantumRange;
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelReadMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x/scale.x+center.x),
(double) (factor*delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)/wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
tool_available_search.c | // RUN: %clang %flags -shared -fPIC %s -o %T/first_tool.so
// RUN: %clang %flags -DTOOL -DSECOND_TOOL -shared -fPIC %s -o %T/second_tool.so
// RUN: %clang %flags -DTOOL -DTHIRD_TOOL -shared -fPIC %s -o %T/third_tool.so
// RUN: %libomp-compile -DCODE
// RUN: env OMP_TOOL_LIBRARIES=%T/non_existing_file.so:%T/first_tool.so:%T/second_tool.so:%T/third_tool.so \
// RUN: OMP_TOOL_VERBOSE_INIT=stdout %libomp-run | FileCheck %s -DPARENTPATH=%T
// REQUIRES: ompt
/*
* This file contains code for three OMPT shared library tool to be
* loaded and the code for the OpenMP executable.
* No option enables code for the first shared library
* (without an implementation of ompt_start_tool) during compilation
* -DTOOL -DSECOND_TOOL enables the code for the second tool during compilation
* -DTOOL -DTHIRD_TOOL enables the code for the third tool during compilation
* -DCODE enables the code for the executable during compilation
*/
// CHECK: ----- START LOGGING OF TOOL REGISTRATION -----
// CHECK-NEXT: Search for OMP tool in current address space... Failed.
// CHECK-NEXT: Searching tool libraries...
// CHECK-NEXT: OMP_TOOL_LIBRARIES = [[PARENTPATH]]/non_existing_file.so
// CHECK-SAME: [[PARENTPATH]]/first_tool.so
// CHECK-SAME: [[PARENTPATH]]/second_tool.so
// CHECK-SAME: [[PARENTPATH]]/third_tool.so
// CHECK-NEXT: Opening [[PARENTPATH]]/non_existing_file.so... Failed:
// CHECK-SAME: [[PARENTPATH]]/non_existing_file.so: cannot open shared object
// CHECK-SAME: file: No such file or directory
// CHECK-NEXT: Opening [[PARENTPATH]]/first_tool.so... Success.
// CHECK-NEXT: Searching for ompt_start_tool in
// CHECK-SAME: [[PARENTPATH]]/first_tool.so... Failed:
// CHECK-SAME: [[PARENTPATH]]/first_tool.so: undefined symbol: ompt_start_tool
// CHECK-NEXT: Opening [[PARENTPATH]]/second_tool.so... Success.
// CHECK-NEXT: Searching for ompt_start_tool in
// CHECK-SAME: [[PARENTPATH]]/second_tool.so... 0: Do not initialize tool
// CHECK-NEXT: Found but not using the OMPT interface.
// CHECK-NEXT: Continuing search...
// CHECK-NEXT: Opening [[PARENTPATH]]/third_tool.so... Success.
// CHECK-NEXT: Searching for ompt_start_tool in
// CHECK-SAME: [[PARENTPATH]]/third_tool.so... 0: Do initialize tool
// CHECK-NEXT: Success.
// CHECK-NEXT: Tool was started and is using the OMPT interface.
// CHECK-NEXT: ----- END LOGGING OF TOOL REGISTRATION -----
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: Tool initialized
// CHECK: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: control_tool()=-1
// CHECK: {{^}}0: Tool finalized
#ifdef CODE
#include "stdio.h"
#include "omp.h"
#include "omp-tools.h"
int main()
{
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
int result = omp_control_tool(omp_control_tool_start, 0, NULL);
printf("0: control_tool()=%d\n", result);
}
}
return 0;
}
#endif /* CODE */
#ifdef TOOL
#include <omp-tools.h>
#include "stdio.h"
#ifdef SECOND_TOOL
// The second tool has an implementation of ompt_start_tool that returns NULL
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do not initialize tool\n");
return NULL;
}
#elif defined(THIRD_TOOL)
// The third tool has an implementation of ompt_start_tool that returns a
// pointer to a valid instance of ompt_start_tool_result_t
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
printf("0: ompt_event_thread_begin\n");
}
int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num,
ompt_data_t *tool_data) {
ompt_set_callback_t ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_set_callback(ompt_callback_thread_begin, (ompt_callback_t)on_ompt_callback_thread_begin);
printf("0: Tool initialized\n");
return 1;
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: Tool finalized\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do initialize tool\n");
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#endif
#endif /* TOOL */
|
GB_binop__atan2_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__atan2_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__atan2_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__atan2_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__atan2_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__atan2_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__atan2_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__atan2_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__atan2_fp32)
// C=scalar+B GB (_bind1st__atan2_fp32)
// C=scalar+B' GB (_bind1st_tran__atan2_fp32)
// C=A+scalar GB (_bind2nd__atan2_fp32)
// C=A'+scalar GB (_bind2nd_tran__atan2_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = atan2f (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = atan2f (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN2 || GxB_NO_FP32 || GxB_NO_ATAN2_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__atan2_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__atan2_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__atan2_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__atan2_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__atan2_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__atan2_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__atan2_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__atan2_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__atan2_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = atan2f (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__atan2_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = atan2f (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = atan2f (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__atan2_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = atan2f (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__atan2_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#include "libperf_int.h"
#include <ucs/debug/log.h>
#include <string.h>
#include <malloc.h>
#include <unistd.h>
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
/* Allocate send buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.send_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate send buffer: %s", ucs_status_string(status));
goto err;
}
ucs_assert(perf->uct.send_mem.md == perf->uct.md);
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.recv_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status));
goto err_free_send;
}
ucs_assert(perf->uct.recv_mem.md == perf->uct.md);
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_send;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_send:
uct_iface_mem_free(&perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
uct_iface_mem_free(&perf->uct.send_mem);
uct_iface_mem_free(&perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->prev.time = perf->start_time;
}
static void ucx_perf_test_reset(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->params = *params;
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + perf->start_time;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.time = perf->start_time;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
perf->offset = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
double factor;
double sec_value;
sec_value = ucs_time_from_sec(1.0);
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time - perf->start_time;
/* Latency */
result->latency.typical =
__find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE)
/ sec_value
/ factor;
result->latency.moment_average =
(double)(perf->current.time - perf->prev.time)
/ (perf->current.iters - perf->prev.iters)
/ sec_value
/ factor;
result->latency.total_average =
(double)(perf->current.time - perf->start_time)
/ perf->current.iters
/ sec_value
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->bandwidth.total_average =
perf->current.bytes * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->msgrate.total_average =
perf->current.msgs * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
if (ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline uint64_t __get_atomic_flag(size_t size, uint64_t flag32, uint64_t flag64)
{
return (size == 4) ? flag32 :
(size == 8) ? flag64 :
0;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface)
{
uct_iface_attr_t attr;
ucs_status_t status;
uint64_t required_flags;
size_t min_size, max_size, max_iov, message_size;
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_AM_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, 0,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, 0,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_ADD32,
UCT_IFACE_FLAG_ATOMIC_ADD64);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_FADD32,
UCT_IFACE_FLAG_ATOMIC_FADD64);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_SWAP32,
UCT_IFACE_FLAG_ATOMIC_SWAP64);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_CSWAP32,
UCT_IFACE_FLAG_ATOMIC_CSWAP64);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
if (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Device does not support required operation");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size larger than message size");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window too large (should be <= %d)",
UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = (void*)rkey_buffer + info.rkey_size;
iface_addr = (void*)dev_addr + info.uct.dev_addr_len;
ep_addr = (void*)iface_addr + info.uct.iface_addr_len;
ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size);
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
}
if (info.rkey_size > 0) {
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(perf->uct.iface, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = (void*)rkey_buffer + remote_info->rkey_size;
iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len;
ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len;
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.type = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_ep_create_connected(perf->uct.iface, dev_addr, iface_addr,
&perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
rte_call(perf, barrier);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.type != NULL) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
rte_call(perf, barrier);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, UCT_AM_CB_FLAG_SYNC);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_check_params(ucx_perf_params_t *params,
uint64_t *features)
{
ucs_status_t status, message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
*features = UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
*features = UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
*features = UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
*features = UCP_FEATURE_TAG;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucs_status_t status;
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = perf->send_buffer;
mem_map_params.length = buffer_size * params->thread_count;
mem_map_params.flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCP_MEM_MAP_NONBLOCK : 0;
mem_map_params.flags |= UCP_MEM_MAP_ALLOCATE;
status = ucp_mem_map(perf->ucp.context, &mem_map_params,
&perf->ucp.send_memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(perf->ucp.send_memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = mem_attr.address;
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = perf->recv_buffer;
mem_map_params.length = buffer_size * params->thread_count;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
status = ucp_mem_map(perf->ucp.context, &mem_map_params, &perf->ucp.recv_memh);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(perf->ucp.recv_memh, &mem_attr);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
perf->recv_buffer = mem_attr.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
ucp_mem_unmap(perf->ucp.context, perf->ucp.recv_memh);
err_free_send_buffer:
ucp_mem_unmap(perf->ucp.context, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
ucp_mem_unmap(perf->ucp.context, perf->ucp.recv_memh);
ucp_mem_unmap(perf->ucp.context, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
unsigned i;
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
ucp_ep_destroy(perf->ucp.peers[i].ep);
}
}
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = UCS_OK;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (void*)(remote_info + 1);
rkey_buffer = (void*)address + remote_info->ucp.addr_len;
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
rte_call(perf, barrier);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10);
perf->report_interval = -1;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_md_resource_desc_t *md_resources;
uct_tl_resource_desc_t *tl_resources;
unsigned i, num_md_resources;
unsigned j, num_tl_resources;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_md_resources(&md_resources, &num_md_resources);
if (status != UCS_OK) {
goto out;
}
for (i = 0; i < num_md_resources; ++i) {
status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_open(md_resources[i].md_name, md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_md_resources;
}
for (j = 0; j < num_tl_resources; ++j) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.md = md;
status = UCS_OK;
goto out_release_md_resources;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_md_resources:
uct_release_md_resource_list(md_resources);
out:
return status;
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.tl_name = params->uct.tl_name,
.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_iface_config_read(params->uct.tl_name, NULL, NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
uint64_t features;
status = ucp_perf_test_check_params(params, &features);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = features;
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = params->thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, features);
if (status != UCS_OK) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
rte_call(perf, barrier);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf, ucx_perf_params_t *params);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch}
};
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_reset(perf, params);
status = ucx_perf_funcs[params->api].setup(perf, params);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
rte_call(perf, barrier);
ucx_perf_test_reset(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
#include <omp.h>
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
ucx_perf_test_reset(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer += ti * message_size;
tctx[ti].perf.recv_buffer += ti * message_size;
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
GB_binop__rdiv_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64)
// A*D function (colscale): GB (_AxD__rdiv_uint64)
// D*A function (rowscale): GB (_DxB__rdiv_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64)
// C=scalar+B GB (_bind1st__rdiv_uint64)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint64)
// C=A+scalar GB (_bind2nd__rdiv_uint64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch, 2ul);
// G
const short ktm[4][3] = {
{ 2, 0, 0},
{ 1, 1, 1},
{ 1, -1, 1},
{ 0, 0, 2}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4],d1[4],d2[4],d3[4];
short w0[4],w1[4],w2[4],w3[4];
short t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q+1);
const short* k2 = kernel0_tm.row<short>(q+2);
const short* k3 = kernel0_tm.row<short>(q+3);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*2);
int* outRow1 = out.row<int>(j*2+1);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[4],s1[4],s2[4],s3[4];
int w0[4],w1[4];
int d0[2],d1[2],d2[2],d3[2];
int o0[2],o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 4];
s2[n] = out_tile[n+ 8];
s3[n] = out_tile[n+12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0];
d1[0] = w0[1]; d1[1] = w1[1];
d2[0] = w0[2]; d2[1] = w1[2];
d3[0] = w0[3]; d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads);
}
static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
#if EIGEN_HAS_CXX11_ATOMIC
#include <atomic>
#endif
#include "../InternalHeaderCheck.h"
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static int m_maxThreads = -1;
EIGEN_UNUSED_VARIABLE(m_maxThreads)
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
// volatile is not enough on all architectures (see bug 1572)
// to guarantee that when thread A says to thread B that it is
// done with packing a block, then all writes have been really
// carried out... C++11 memory model+atomic guarantees this.
#if EIGEN_HAS_CXX11_ATOMIC
std::atomic<Index> sync;
std::atomic<int> users;
#else
Index volatile sync;
int volatile users;
#endif
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
// Without C++11, we have to disable GEMM's parallelization on
// non x86 architectures because there volatile is not enough for our purpose.
// See bug 1572.
#if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64))
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) ));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
convolution_3x3_pack1to4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#if __ARM_NEON && __aarch64__
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator);
#else
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
#endif
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f);
{
float* ptr = (float*)out0;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias0);
vst1q_f32(ptr + 12, _bias0);
vst1q_f32(ptr + 16, _bias1);
vst1q_f32(ptr + 20, _bias1);
vst1q_f32(ptr + 24, _bias1);
vst1q_f32(ptr + 28, _bias1);
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias1);
vst1q_f32(ptr + 12, _bias1);
ptr += 16;
}
for (; j < outw; j++)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias1);
ptr += 8;
}
}
}
const unsigned short* k0 = kernel.channel(p);
const unsigned short* k1 = kernel.channel(p + 1);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"ld1 {v1.s}[0], [%1] \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %17.4s, v0.s[0] \n"
"fmla v29.4s, %17.4s, v0.s[1] \n"
"fmla v30.4s, %17.4s, v0.s[2] \n"
"fmla v31.4s, %17.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"ld1 {v3.s}[0], [%2] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %18.4s, v0.s[1] \n"
"fmla v29.4s, %18.4s, v0.s[2] \n"
"fmla v30.4s, %18.4s, v0.s[3] \n"
"fmla v31.4s, %18.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %19.4s, v0.s[2] \n"
"fmla v29.4s, %19.4s, v0.s[3] \n"
"fmla v30.4s, %19.4s, v1.s[0] \n"
"fmla v31.4s, %19.4s, v1.s[1] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"fmla v28.4s, %20.4s, v2.s[0] \n"
"fmla v29.4s, %20.4s, v2.s[1] \n"
"fmla v30.4s, %20.4s, v2.s[2] \n"
"fmla v31.4s, %20.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"ld1 {v1.s}[0], [%3] \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"fmla v28.4s, %21.4s, v2.s[1] \n"
"fmla v29.4s, %21.4s, v2.s[2] \n"
"fmla v30.4s, %21.4s, v2.s[3] \n"
"fmla v31.4s, %21.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"fmla v28.4s, %22.4s, v2.s[2] \n"
"fmla v29.4s, %22.4s, v2.s[3] \n"
"fmla v30.4s, %22.4s, v3.s[0] \n"
"fmla v31.4s, %22.4s, v3.s[1] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %23.4s, v0.s[0] \n"
"fmla v29.4s, %23.4s, v0.s[1] \n"
"fmla v30.4s, %23.4s, v0.s[2] \n"
"fmla v31.4s, %23.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %24.4s, v0.s[1] \n"
"fmla v29.4s, %24.4s, v0.s[2] \n"
"fmla v30.4s, %24.4s, v0.s[3] \n"
"fmla v31.4s, %24.4s, v1.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %25.4s, v0.s[2] \n"
"fmla v29.4s, %25.4s, v0.s[3] \n"
"fmla v30.4s, %25.4s, v1.s[0] \n"
"fmla v31.4s, %25.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %17.4s, v0.s[0] \n"
"fmla v27.4s, %17.4s, v0.s[1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %18.4s, v0.s[1] \n"
"fmla v27.4s, %18.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %19.4s, v0.s[2] \n"
"fmla v27.4s, %19.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"fmla v26.4s, %20.4s, v1.s[0] \n"
"fmla v27.4s, %20.4s, v1.s[1] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n"
"fmla v24.4s, %12.4s, v1.s[1] \n"
"fmla v25.4s, %12.4s, v1.s[2] \n"
"fmla v26.4s, %21.4s, v1.s[1] \n"
"fmla v27.4s, %21.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %13.4s, v1.s[2] \n"
"fmla v25.4s, %13.4s, v1.s[3] \n"
"fmla v26.4s, %22.4s, v1.s[2] \n"
"fmla v27.4s, %22.4s, v1.s[3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %23.4s, v0.s[0] \n"
"fmla v27.4s, %23.4s, v0.s[1] \n"
"add %1, %1, #4 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %24.4s, v0.s[1] \n"
"fmla v27.4s, %24.4s, v0.s[2] \n"
"add %2, %2, #4 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %25.4s, v0.s[2] \n"
"fmla v27.4s, %25.4s, v0.s[3] \n"
"add %3, %3, #4 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "memory", "v0", "v1", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
float32x4_t _sum00 = vld1q_f32(outptr0);
float32x4_t _sum10 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0 + 4, _sum10);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
unsigned short* outptr1_bf16 = top_blob.channel(p + 1);
const float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"ld1 {v1.s}[0], [%3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
"fmla v24.4s, %12.4s, v0.s[0] \n"
"fmla v25.4s, %12.4s, v0.s[1] \n"
"fmla v26.4s, %12.4s, v0.s[2] \n"
"fmla v27.4s, %12.4s, v0.s[3] \n"
"fmla v28.4s, %21.4s, v0.s[0] \n"
"fmla v29.4s, %21.4s, v0.s[1] \n"
"fmla v30.4s, %21.4s, v0.s[2] \n"
"fmla v31.4s, %21.4s, v0.s[3] \n"
"fmla v24.4s, %13.4s, v0.s[1] \n"
"fmla v25.4s, %13.4s, v0.s[2] \n"
"fmla v26.4s, %13.4s, v0.s[3] \n"
"fmla v27.4s, %13.4s, v1.s[0] \n"
"fmla v28.4s, %22.4s, v0.s[1] \n"
"fmla v29.4s, %22.4s, v0.s[2] \n"
"fmla v30.4s, %22.4s, v0.s[3] \n"
"fmla v31.4s, %22.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v2.4h}, [%4], #8 \n"
"ld1 {v3.s}[0], [%4] \n"
"fmla v24.4s, %14.4s, v0.s[2] \n"
"fmla v25.4s, %14.4s, v0.s[3] \n"
"fmla v26.4s, %14.4s, v1.s[0] \n"
"fmla v27.4s, %14.4s, v1.s[1] \n"
"fmla v28.4s, %23.4s, v0.s[2] \n"
"fmla v29.4s, %23.4s, v0.s[3] \n"
"fmla v30.4s, %23.4s, v1.s[0] \n"
"fmla v31.4s, %23.4s, v1.s[1] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %15.4s, v2.s[0] \n"
"fmla v25.4s, %15.4s, v2.s[1] \n"
"fmla v26.4s, %15.4s, v2.s[2] \n"
"fmla v27.4s, %15.4s, v2.s[3] \n"
"fmla v28.4s, %24.4s, v2.s[0] \n"
"fmla v29.4s, %24.4s, v2.s[1] \n"
"fmla v30.4s, %24.4s, v2.s[2] \n"
"fmla v31.4s, %24.4s, v2.s[3] \n"
"fmla v24.4s, %16.4s, v2.s[1] \n"
"fmla v25.4s, %16.4s, v2.s[2] \n"
"fmla v26.4s, %16.4s, v2.s[3] \n"
"fmla v27.4s, %16.4s, v3.s[0] \n"
"fmla v28.4s, %25.4s, v2.s[1] \n"
"fmla v29.4s, %25.4s, v2.s[2] \n"
"fmla v30.4s, %25.4s, v2.s[3] \n"
"fmla v31.4s, %25.4s, v3.s[0] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"ld1 {v1.s}[0], [%5] \n"
"fmla v24.4s, %17.4s, v2.s[2] \n"
"fmla v25.4s, %17.4s, v2.s[3] \n"
"fmla v26.4s, %17.4s, v3.s[0] \n"
"fmla v27.4s, %17.4s, v3.s[1] \n"
"fmla v28.4s, %26.4s, v2.s[2] \n"
"fmla v29.4s, %26.4s, v2.s[3] \n"
"fmla v30.4s, %26.4s, v3.s[0] \n"
"fmla v31.4s, %26.4s, v3.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %18.4s, v0.s[0] \n"
"fmla v25.4s, %18.4s, v0.s[1] \n"
"fmla v26.4s, %18.4s, v0.s[2] \n"
"fmla v27.4s, %18.4s, v0.s[3] \n"
"fmla v28.4s, %27.4s, v0.s[0] \n"
"fmla v29.4s, %27.4s, v0.s[1] \n"
"fmla v30.4s, %27.4s, v0.s[2] \n"
"fmla v31.4s, %27.4s, v0.s[3] \n"
"fmla v24.4s, %19.4s, v0.s[1] \n"
"fmla v25.4s, %19.4s, v0.s[2] \n"
"fmla v26.4s, %19.4s, v0.s[3] \n"
"fmla v27.4s, %19.4s, v1.s[0] \n"
"fmla v28.4s, %28.4s, v0.s[1] \n"
"fmla v29.4s, %28.4s, v0.s[2] \n"
"fmla v30.4s, %28.4s, v0.s[3] \n"
"fmla v31.4s, %28.4s, v1.s[0] \n"
"fmla v24.4s, %20.4s, v0.s[2] \n"
"fmla v25.4s, %20.4s, v0.s[3] \n"
"fmla v26.4s, %20.4s, v1.s[0] \n"
"fmla v27.4s, %20.4s, v1.s[1] \n"
"fmla v28.4s, %29.4s, v0.s[2] \n"
"fmla v29.4s, %29.4s, v0.s[3] \n"
"fmla v30.4s, %29.4s, v1.s[0] \n"
"fmla v31.4s, %29.4s, v1.s[1] \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %12.4s, v0.s[0] \n"
"fmla v25.4s, %12.4s, v0.s[1] \n"
"fmla v26.4s, %21.4s, v0.s[0] \n"
"fmla v27.4s, %21.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v1.4h}, [%4] \n"
"fmla v24.4s, %13.4s, v0.s[1] \n"
"fmla v25.4s, %13.4s, v0.s[2] \n"
"fmla v26.4s, %22.4s, v0.s[1] \n"
"fmla v27.4s, %22.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %14.4s, v0.s[2] \n"
"fmla v25.4s, %14.4s, v0.s[3] \n"
"fmla v26.4s, %23.4s, v0.s[2] \n"
"fmla v27.4s, %23.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v1.s[0] \n"
"fmla v25.4s, %15.4s, v1.s[1] \n"
"fmla v26.4s, %24.4s, v1.s[0] \n"
"fmla v27.4s, %24.4s, v1.s[1] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5] \n"
"fmla v24.4s, %16.4s, v1.s[1] \n"
"fmla v25.4s, %16.4s, v1.s[2] \n"
"fmla v26.4s, %25.4s, v1.s[1] \n"
"fmla v27.4s, %25.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %17.4s, v1.s[2] \n"
"fmla v25.4s, %17.4s, v1.s[3] \n"
"fmla v26.4s, %26.4s, v1.s[2] \n"
"fmla v27.4s, %26.4s, v1.s[3] \n"
"fmla v24.4s, %18.4s, v0.s[0] \n"
"fmla v25.4s, %18.4s, v0.s[1] \n"
"fmla v26.4s, %27.4s, v0.s[0] \n"
"fmla v27.4s, %27.4s, v0.s[1] \n"
"fmla v24.4s, %19.4s, v0.s[1] \n"
"fmla v25.4s, %19.4s, v0.s[2] \n"
"fmla v26.4s, %28.4s, v0.s[1] \n"
"fmla v27.4s, %28.4s, v0.s[2] \n"
"add %3, %3, #4 \n"
"fmla v24.4s, %20.4s, v0.s[2] \n"
"fmla v25.4s, %20.4s, v0.s[3] \n"
"fmla v26.4s, %29.4s, v0.s[2] \n"
"fmla v27.4s, %29.4s, v0.s[3] \n"
"add %4, %4, #4 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"add %5, %5, #4 \n"
"st1 {v24.4h, v25.4h}, [%0], #16 \n"
"st1 {v26.4h, v27.4h}, [%1], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "memory", "v0", "v1", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
float32x4_t _sum00 = vld1q_f32(outptr0);
float32x4_t _sum10 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2);
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum00), 16));
vst1_u16(outptr1_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum10), 16));
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
outptr0_bf16 += 4;
outptr1_bf16 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const unsigned short* k0 = kernel.channel(p);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<unsigned short>(0);
const unsigned short* r1 = img0.row<unsigned short>(1);
const unsigned short* r2 = img0.row<unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
// "prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n"
"ld1 {v2.s}[0], [%1] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %8.4s, v1.s[0] \n"
"fmla v29.4s, %8.4s, v1.s[1] \n"
"fmla v30.4s, %8.4s, v1.s[2] \n"
"fmla v31.4s, %8.4s, v1.s[3] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %9.4s, v1.s[1] \n"
"fmla v29.4s, %9.4s, v1.s[2] \n"
"fmla v30.4s, %9.4s, v1.s[3] \n"
"fmla v31.4s, %9.4s, v2.s[0] \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %10.4s, v1.s[2] \n"
"fmla v29.4s, %10.4s, v1.s[3] \n"
"fmla v30.4s, %10.4s, v2.s[0] \n"
"fmla v31.4s, %10.4s, v2.s[1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.4h, v5.4h}, [%2], #16 \n"
"ld1 {v2.s}[0], [%2] \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %11.4s, v4.s[0] \n"
"fmla v25.4s, %11.4s, v4.s[1] \n"
"fmla v26.4s, %11.4s, v4.s[2] \n"
"fmla v27.4s, %11.4s, v4.s[3] \n"
"fmla v28.4s, %11.4s, v5.s[0] \n"
"fmla v29.4s, %11.4s, v5.s[1] \n"
"fmla v30.4s, %11.4s, v5.s[2] \n"
"fmla v31.4s, %11.4s, v5.s[3] \n"
"fmla v24.4s, %12.4s, v4.s[1] \n"
"fmla v25.4s, %12.4s, v4.s[2] \n"
"fmla v26.4s, %12.4s, v4.s[3] \n"
"fmla v27.4s, %12.4s, v5.s[0] \n"
"fmla v28.4s, %12.4s, v5.s[1] \n"
"fmla v29.4s, %12.4s, v5.s[2] \n"
"fmla v30.4s, %12.4s, v5.s[3] \n"
"fmla v31.4s, %12.4s, v2.s[0] \n"
"fmla v24.4s, %13.4s, v4.s[2] \n"
"fmla v25.4s, %13.4s, v4.s[3] \n"
"fmla v26.4s, %13.4s, v5.s[0] \n"
"fmla v27.4s, %13.4s, v5.s[1] \n"
"fmla v28.4s, %13.4s, v5.s[2] \n"
"fmla v29.4s, %13.4s, v5.s[3] \n"
"fmla v30.4s, %13.4s, v2.s[0] \n"
"fmla v31.4s, %13.4s, v2.s[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"ld1 {v2.s}[0], [%3] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %14.4s, v1.s[0] \n"
"fmla v29.4s, %14.4s, v1.s[1] \n"
"fmla v30.4s, %14.4s, v1.s[2] \n"
"fmla v31.4s, %14.4s, v1.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %15.4s, v1.s[1] \n"
"fmla v29.4s, %15.4s, v1.s[2] \n"
"fmla v30.4s, %15.4s, v1.s[3] \n"
"fmla v31.4s, %15.4s, v2.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %16.4s, v1.s[2] \n"
"fmla v29.4s, %16.4s, v1.s[3] \n"
"fmla v30.4s, %16.4s, v2.s[0] \n"
"fmla v31.4s, %16.4s, v2.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"shll v0.4s, v0.4h, #16 \n"
"ld1 {v1.s}[0], [%1] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"ld1 {v3.s}[0], [%2] \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"ld1 {v1.s}[0], [%3] \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #64] \n"
"vld1.u16 {d1}, [%1]! \n"
"vld1.u32 {d2[0]}, [%1] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q8, d0[0] \n"
"vmla.f32 q13, %q8, d0[1] \n"
"vmla.f32 q14, %q8, d1[0] \n"
"vmla.f32 q15, %q8, d1[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"vmla.f32 q14, %q9, d1[1] \n"
"vmla.f32 q15, %q9, d2[0] \n"
"vmla.f32 q12, %q10, d1[0] \n"
"vmla.f32 q13, %q10, d1[1] \n"
"vmla.f32 q14, %q10, d2[0] \n"
"vmla.f32 q15, %q10, d2[1] \n"
"pld [%2, #64] \n"
"vld1.u16 {d5}, [%2]! \n"
"vld1.u32 {d3[0]}, [%2] \n"
"vshll.u16 q2, d5, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q11, d4[0] \n"
"vmla.f32 q13, %q11, d4[1] \n"
"vmla.f32 q14, %q11, d5[0] \n"
"vmla.f32 q15, %q11, d5[1] \n"
"vmla.f32 q12, %q12, d4[1] \n"
"vmla.f32 q13, %q12, d5[0] \n"
"vmla.f32 q14, %q12, d5[1] \n"
"vmla.f32 q15, %q12, d2[0] \n"
"vmla.f32 q12, %q13, d5[0] \n"
"vmla.f32 q13, %q13, d5[1] \n"
"vmla.f32 q14, %q13, d2[0] \n"
"vmla.f32 q15, %q13, d2[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3]! \n"
"vld1.u32 {d2[0]}, [%3] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q14, d0[0] \n"
"vmla.f32 q13, %q14, d0[1] \n"
"vmla.f32 q14, %q14, d1[0] \n"
"vmla.f32 q15, %q14, d1[1] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q15, d1[1] \n"
"vmla.f32 q15, %q15, d2[0] \n"
"vmla.f32 q12, %q16, d1[0] \n"
"vmla.f32 q13, %q16, d1[1] \n"
"vmla.f32 q14, %q16, d2[0] \n"
"vmla.f32 q15, %q16, d2[1] \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4s, v29.4s}, [%0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmul v24.4s, %8.4s, v0.s[0] \n"
"fmul v25.4s, %8.4s, v0.s[1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n"
"fmul v26.4s, %9.4s, v0.s[1] \n"
"fmul v27.4s, %9.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v28.4s, %10.4s, v0.s[2] \n"
"fmla v29.4s, %10.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n"
"fmla v26.4s, %12.4s, v1.s[1] \n"
"fmla v27.4s, %12.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v28.4s, %13.4s, v1.s[2] \n"
"fmla v29.4s, %13.4s, v1.s[3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %15.4s, v0.s[1] \n"
"fmla v27.4s, %15.4s, v0.s[2] \n"
"fmla v28.4s, %16.4s, v0.s[2] \n"
"fmla v29.4s, %16.4s, v0.s[3] \n"
"add %1, %1, #4 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"add %2, %2, #4 \n"
"fadd v28.4s, v28.4s, v24.4s \n"
"fadd v29.4s, v29.4s, v25.4s \n"
"add %3, %3, #4 \n"
"st1 {v28.4s, v29.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29");
#else // __aarch64__
asm volatile(
"pld [%1, #64] \n"
"vld1.u16 {d1}, [%1] \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"
"vshll.u16 q0, d1, #16 \n"
"vmul.f32 q14, %q8, d0[0] \n"
"vmul.f32 q15, %q8, d0[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"pld [%2, #64] \n"
"vld1.u16 {d3}, [%2] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q11, d2[0] \n"
"vmla.f32 q13, %q11, d2[1] \n"
"vmla.f32 q14, %q12, d2[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3] \n"
"vmla.f32 q12, %q13, d3[0] \n"
"vmla.f32 q13, %q13, d3[1] \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q14, %q14, d0[0] \n"
"vmla.f32 q15, %q14, d0[1] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"add %1, %1, #4 \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"add %2, %2, #4 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"add %3, %3, #4 \n"
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<unsigned short>(0);
const unsigned short* r1 = img0.row<unsigned short>(1);
const unsigned short* r2 = img0.row<unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n"
"ld1 {v2.s}[0], [%2] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %10.4s, v0.s[0] \n"
"fmla v25.4s, %10.4s, v0.s[1] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"fmla v28.4s, %10.4s, v1.s[0] \n"
"fmla v29.4s, %10.4s, v1.s[1] \n"
"fmla v30.4s, %10.4s, v1.s[2] \n"
"fmla v31.4s, %10.4s, v1.s[3] \n"
"fmla v24.4s, %11.4s, v0.s[1] \n"
"fmla v25.4s, %11.4s, v0.s[2] \n"
"fmla v26.4s, %11.4s, v0.s[3] \n"
"fmla v27.4s, %11.4s, v1.s[0] \n"
"fmla v28.4s, %11.4s, v1.s[1] \n"
"fmla v29.4s, %11.4s, v1.s[2] \n"
"fmla v30.4s, %11.4s, v1.s[3] \n"
"fmla v31.4s, %11.4s, v2.s[0] \n"
"fmla v24.4s, %12.4s, v0.s[2] \n"
"fmla v25.4s, %12.4s, v0.s[3] \n"
"fmla v26.4s, %12.4s, v1.s[0] \n"
"fmla v27.4s, %12.4s, v1.s[1] \n"
"fmla v28.4s, %12.4s, v1.s[2] \n"
"fmla v29.4s, %12.4s, v1.s[3] \n"
"fmla v30.4s, %12.4s, v2.s[0] \n"
"fmla v31.4s, %12.4s, v2.s[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4h, v5.4h}, [%3], #16 \n"
"ld1 {v2.s}[0], [%3] \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %13.4s, v4.s[0] \n"
"fmla v25.4s, %13.4s, v4.s[1] \n"
"fmla v26.4s, %13.4s, v4.s[2] \n"
"fmla v27.4s, %13.4s, v4.s[3] \n"
"fmla v28.4s, %13.4s, v5.s[0] \n"
"fmla v29.4s, %13.4s, v5.s[1] \n"
"fmla v30.4s, %13.4s, v5.s[2] \n"
"fmla v31.4s, %13.4s, v5.s[3] \n"
"fmla v24.4s, %14.4s, v4.s[1] \n"
"fmla v25.4s, %14.4s, v4.s[2] \n"
"fmla v26.4s, %14.4s, v4.s[3] \n"
"fmla v27.4s, %14.4s, v5.s[0] \n"
"fmla v28.4s, %14.4s, v5.s[1] \n"
"fmla v29.4s, %14.4s, v5.s[2] \n"
"fmla v30.4s, %14.4s, v5.s[3] \n"
"fmla v31.4s, %14.4s, v2.s[0] \n"
"fmla v24.4s, %15.4s, v4.s[2] \n"
"fmla v25.4s, %15.4s, v4.s[3] \n"
"fmla v26.4s, %15.4s, v5.s[0] \n"
"fmla v27.4s, %15.4s, v5.s[1] \n"
"fmla v28.4s, %15.4s, v5.s[2] \n"
"fmla v29.4s, %15.4s, v5.s[3] \n"
"fmla v30.4s, %15.4s, v2.s[0] \n"
"fmla v31.4s, %15.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n"
"ld1 {v2.s}[0], [%4] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"fmla v28.4s, %16.4s, v1.s[0] \n"
"fmla v29.4s, %16.4s, v1.s[1] \n"
"fmla v30.4s, %16.4s, v1.s[2] \n"
"fmla v31.4s, %16.4s, v1.s[3] \n"
"fmla v24.4s, %17.4s, v0.s[1] \n"
"fmla v25.4s, %17.4s, v0.s[2] \n"
"fmla v26.4s, %17.4s, v0.s[3] \n"
"fmla v27.4s, %17.4s, v1.s[0] \n"
"fmla v28.4s, %17.4s, v1.s[1] \n"
"fmla v29.4s, %17.4s, v1.s[2] \n"
"fmla v30.4s, %17.4s, v1.s[3] \n"
"fmla v31.4s, %17.4s, v2.s[0] \n"
"fmla v24.4s, %18.4s, v0.s[2] \n"
"fmla v25.4s, %18.4s, v0.s[3] \n"
"fmla v26.4s, %18.4s, v1.s[0] \n"
"fmla v27.4s, %18.4s, v1.s[1] \n"
"fmla v28.4s, %18.4s, v1.s[2] \n"
"fmla v29.4s, %18.4s, v1.s[3] \n"
"fmla v30.4s, %18.4s, v2.s[0] \n"
"fmla v31.4s, %18.4s, v2.s[1] \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"ld1 {v1.s}[0], [%2] \n"
"fmla v24.4s, %10.4s, v0.s[0] \n"
"fmla v25.4s, %10.4s, v0.s[1] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %11.4s, v0.s[1] \n"
"fmla v25.4s, %11.4s, v0.s[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3], #8 \n"
"fmla v26.4s, %11.4s, v0.s[3] \n"
"fmla v27.4s, %11.4s, v1.s[0] \n"
"ld1 {v3.s}[0], [%3] \n"
"fmla v24.4s, %12.4s, v0.s[2] \n"
"fmla v25.4s, %12.4s, v0.s[3] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v26.4s, %12.4s, v1.s[0] \n"
"fmla v27.4s, %12.4s, v1.s[1] \n"
"fmla v24.4s, %13.4s, v2.s[0] \n"
"fmla v25.4s, %13.4s, v2.s[1] \n"
"fmla v26.4s, %13.4s, v2.s[2] \n"
"fmla v27.4s, %13.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %14.4s, v2.s[1] \n"
"fmla v25.4s, %14.4s, v2.s[2] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n"
"fmla v26.4s, %14.4s, v2.s[3] \n"
"fmla v27.4s, %14.4s, v3.s[0] \n"
"ld1 {v1.s}[0], [%4] \n"
"fmla v24.4s, %15.4s, v2.s[2] \n"
"fmla v25.4s, %15.4s, v2.s[3] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v26.4s, %15.4s, v3.s[0] \n"
"fmla v27.4s, %15.4s, v3.s[1] \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %17.4s, v0.s[1] \n"
"fmla v25.4s, %17.4s, v0.s[2] \n"
"fmla v26.4s, %17.4s, v0.s[3] \n"
"fmla v27.4s, %17.4s, v1.s[0] \n"
"fmla v24.4s, %18.4s, v0.s[2] \n"
"fmla v25.4s, %18.4s, v0.s[3] \n"
"fmla v26.4s, %18.4s, v1.s[0] \n"
"fmla v27.4s, %18.4s, v1.s[1] \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2]! \n"
"vld1.u32 {d2[0]}, [%2] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q10, d0[0] \n"
"vmla.f32 q13, %q10, d0[1] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vmla.f32 q12, %q11, d0[1] \n"
"vmla.f32 q13, %q11, d1[0] \n"
"vmla.f32 q14, %q11, d1[1] \n"
"vmla.f32 q15, %q11, d2[0] \n"
"vmla.f32 q12, %q12, d1[0] \n"
"vmla.f32 q13, %q12, d1[1] \n"
"vmla.f32 q14, %q12, d2[0] \n"
"vmla.f32 q15, %q12, d2[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d5}, [%3]! \n"
"vld1.u32 {d3[0]}, [%3] \n"
"vshll.u16 q2, d5, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q13, d4[0] \n"
"vmla.f32 q13, %q13, d4[1] \n"
"vmla.f32 q14, %q13, d5[0] \n"
"vmla.f32 q15, %q13, d5[1] \n"
"vmla.f32 q12, %q14, d4[1] \n"
"vmla.f32 q13, %q14, d5[0] \n"
"vmla.f32 q14, %q14, d5[1] \n"
"vmla.f32 q15, %q14, d2[0] \n"
"vmla.f32 q12, %q15, d5[0] \n"
"vmla.f32 q13, %q15, d5[1] \n"
"vmla.f32 q14, %q15, d2[0] \n"
"vmla.f32 q15, %q15, d2[1] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4]! \n"
"vld1.u32 {d2[0]}, [%4] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q16, d0[0] \n"
"vmla.f32 q13, %q16, d0[1] \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"vmla.f32 q12, %q17, d0[1] \n"
"vmla.f32 q13, %q17, d1[0] \n"
"vmla.f32 q14, %q17, d1[1] \n"
"vmla.f32 q15, %q17, d2[0] \n"
"vmla.f32 q12, %q18, d1[0] \n"
"vmla.f32 q13, %q18, d1[1] \n"
"vmla.f32 q14, %q18, d2[0] \n"
"vmla.f32 q15, %q18, d2[1] \n"
"vshrn.s32 d24, q12, #16 \n"
"vshrn.s32 d25, q13, #16 \n"
"vshrn.s32 d26, q14, #16 \n"
"vshrn.s32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v28.4s, v29.4s}, [%1], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"fmul v24.4s, %10.4s, v0.s[0] \n"
"fmul v25.4s, %10.4s, v0.s[1] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v1.4h}, [%3] \n"
"fmul v26.4s, %11.4s, v0.s[1] \n"
"fmul v27.4s, %11.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v28.4s, %12.4s, v0.s[2] \n"
"fmla v29.4s, %12.4s, v0.s[3] \n"
"fmla v24.4s, %13.4s, v1.s[0] \n"
"fmla v25.4s, %13.4s, v1.s[1] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4] \n"
"fmla v26.4s, %14.4s, v1.s[1] \n"
"fmla v27.4s, %14.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v28.4s, %15.4s, v1.s[2] \n"
"fmla v29.4s, %15.4s, v1.s[3] \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %17.4s, v0.s[1] \n"
"fmla v27.4s, %17.4s, v0.s[2] \n"
"fmla v28.4s, %18.4s, v0.s[2] \n"
"fmla v29.4s, %18.4s, v0.s[3] \n"
"add %2, %2, #4 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"add %3, %3, #4 \n"
"fadd v28.4s, v28.4s, v24.4s \n"
"fadd v29.4s, v29.4s, v25.4s \n"
"add %4, %4, #4 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"st1 {v28.4h, v29.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29");
#else // __aarch64__
asm volatile(
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2] \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"
"vshll.u16 q0, d1, #16 \n"
"vmul.f32 q14, %q10, d0[0] \n"
"vmul.f32 q15, %q10, d0[1] \n"
"vmla.f32 q12, %q11, d0[1] \n"
"vmla.f32 q13, %q11, d1[0] \n"
"pld [%3, #64] \n"
"vld1.u16 {d3}, [%3] \n"
"vmla.f32 q14, %q12, d1[0] \n"
"vmla.f32 q15, %q12, d1[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q13, d2[0] \n"
"vmla.f32 q13, %q13, d2[1] \n"
"vmla.f32 q14, %q14, d2[1] \n"
"vmla.f32 q15, %q14, d3[0] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4] \n"
"vmla.f32 q12, %q15, d3[0] \n"
"vmla.f32 q13, %q15, d3[1] \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q14, %q16, d0[0] \n"
"vmla.f32 q15, %q16, d0[1] \n"
"vmla.f32 q12, %q17, d0[1] \n"
"vmla.f32 q13, %q17, d1[0] \n"
"add %2, %2, #4 \n"
"vmla.f32 q14, %q18, d1[0] \n"
"vmla.f32 q15, %q18, d1[1] \n"
"add %3, %3, #4 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"add %4, %4, #4 \n"
"vshrn.s32 d24, q12, #16 \n"
"vshrn.s32 d25, q13, #16 \n"
"vst1.f32 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "q0", "q1", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
outptr0_bf16 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#if __ARM_NEON && __aarch64__
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator);
#else
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
#endif
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f);
{
float* ptr = (float*)out0;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias0);
vst1q_f32(ptr + 12, _bias0);
vst1q_f32(ptr + 16, _bias1);
vst1q_f32(ptr + 20, _bias1);
vst1q_f32(ptr + 24, _bias1);
vst1q_f32(ptr + 28, _bias1);
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias1);
vst1q_f32(ptr + 12, _bias1);
ptr += 16;
}
for (; j < outw; j++)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias1);
ptr += 8;
}
}
}
const unsigned short* k0 = kernel.channel(p);
const unsigned short* k1 = kernel.channel(p + 1);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
// r0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
// "prfm pldl1keep, [%0, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum1
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %8.4s, v0.s[0] \n"
"fmla v7.4s, %8.4s, v0.s[2] \n"
"fmla v8.4s, %8.4s, v1.s[0] \n"
"fmla v9.4s, %8.4s, v1.s[2] \n"
"fmla v10.4s, %17.4s, v0.s[0] \n"
"fmla v11.4s, %17.4s, v0.s[2] \n"
"fmla v12.4s, %17.4s, v1.s[0] \n"
"fmla v13.4s, %17.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%1] \n"
"fmla v6.4s, %9.4s, v0.s[1] \n"
"fmla v7.4s, %9.4s, v0.s[3] \n"
"fmla v8.4s, %9.4s, v1.s[1] \n"
"fmla v9.4s, %9.4s, v1.s[3] \n"
"fmla v10.4s, %18.4s, v0.s[1] \n"
"fmla v11.4s, %18.4s, v0.s[3] \n"
"fmla v12.4s, %18.4s, v1.s[1] \n"
"fmla v13.4s, %18.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4h, v3.4h}, [%2], #16 \n"
"fmla v6.4s, %10.4s, v0.s[2] \n"
"fmla v7.4s, %10.4s, v1.s[0] \n"
"fmla v8.4s, %10.4s, v1.s[2] \n"
"fmla v9.4s, %10.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %19.4s, v0.s[2] \n"
"fmla v11.4s, %19.4s, v1.s[0] \n"
"fmla v12.4s, %19.4s, v1.s[2] \n"
"fmla v13.4s, %19.4s, v4.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %11.4s, v2.s[0] \n"
"fmla v7.4s, %11.4s, v2.s[2] \n"
"fmla v8.4s, %11.4s, v3.s[0] \n"
"fmla v9.4s, %11.4s, v3.s[2] \n"
"fmla v10.4s, %20.4s, v2.s[0] \n"
"fmla v11.4s, %20.4s, v2.s[2] \n"
"fmla v12.4s, %20.4s, v3.s[0] \n"
"fmla v13.4s, %20.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%2] \n"
"fmla v6.4s, %12.4s, v2.s[1] \n"
"fmla v7.4s, %12.4s, v2.s[3] \n"
"fmla v8.4s, %12.4s, v3.s[1] \n"
"fmla v9.4s, %12.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v10.4s, %21.4s, v2.s[1] \n"
"fmla v11.4s, %21.4s, v2.s[3] \n"
"fmla v12.4s, %21.4s, v3.s[1] \n"
"fmla v13.4s, %21.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"fmla v6.4s, %13.4s, v2.s[2] \n"
"fmla v7.4s, %13.4s, v3.s[0] \n"
"fmla v8.4s, %13.4s, v3.s[2] \n"
"fmla v9.4s, %13.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %22.4s, v2.s[2] \n"
"fmla v11.4s, %22.4s, v3.s[0] \n"
"fmla v12.4s, %22.4s, v3.s[2] \n"
"fmla v13.4s, %22.4s, v5.s[0] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %14.4s, v0.s[0] \n"
"fmla v7.4s, %14.4s, v0.s[2] \n"
"fmla v8.4s, %14.4s, v1.s[0] \n"
"fmla v9.4s, %14.4s, v1.s[2] \n"
"fmla v10.4s, %23.4s, v0.s[0] \n"
"fmla v11.4s, %23.4s, v0.s[2] \n"
"fmla v12.4s, %23.4s, v1.s[0] \n"
"fmla v13.4s, %23.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%3] \n"
"fmla v6.4s, %15.4s, v0.s[1] \n"
"fmla v7.4s, %15.4s, v0.s[3] \n"
"fmla v8.4s, %15.4s, v1.s[1] \n"
"fmla v9.4s, %15.4s, v1.s[3] \n"
"fmla v10.4s, %24.4s, v0.s[1] \n"
"fmla v11.4s, %24.4s, v0.s[3] \n"
"fmla v12.4s, %24.4s, v1.s[1] \n"
"fmla v13.4s, %24.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[2] \n"
"fmla v7.4s, %16.4s, v1.s[0] \n"
"fmla v8.4s, %16.4s, v1.s[2] \n"
"fmla v9.4s, %16.4s, v4.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v10.4s, %25.4s, v0.s[2] \n"
"fmla v11.4s, %25.4s, v1.s[0] \n"
"fmla v12.4s, %25.4s, v1.s[2] \n"
"fmla v13.4s, %25.4s, v4.s[0] \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
// r0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum0 sum1
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %8.4s, v0.s[0] \n"
"fmla v11.4s, %8.4s, v0.s[2] \n"
"fmla v12.4s, %17.4s, v0.s[0] \n"
"fmla v13.4s, %17.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%1] \n"
"fmla v10.4s, %9.4s, v0.s[1] \n"
"fmla v11.4s, %9.4s, v0.s[3] \n"
"fmla v12.4s, %18.4s, v0.s[1] \n"
"fmla v13.4s, %18.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"fmla v10.4s, %10.4s, v0.s[2] \n"
"fmla v11.4s, %10.4s, v1.s[0] \n"
"fmla v12.4s, %19.4s, v0.s[2] \n"
"fmla v13.4s, %19.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %11.4s, v2.s[0] \n"
"fmla v11.4s, %11.4s, v2.s[2] \n"
"fmla v12.4s, %20.4s, v2.s[0] \n"
"fmla v13.4s, %20.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%2] \n"
"fmla v10.4s, %12.4s, v2.s[1] \n"
"fmla v11.4s, %12.4s, v2.s[3] \n"
"fmla v12.4s, %21.4s, v2.s[1] \n"
"fmla v13.4s, %21.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"fmla v10.4s, %13.4s, v2.s[2] \n"
"fmla v11.4s, %13.4s, v3.s[0] \n"
"fmla v12.4s, %22.4s, v2.s[2] \n"
"fmla v13.4s, %22.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %14.4s, v0.s[0] \n"
"fmla v11.4s, %14.4s, v0.s[2] \n"
"fmla v12.4s, %23.4s, v0.s[0] \n"
"fmla v13.4s, %23.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%3] \n"
"fmla v10.4s, %15.4s, v0.s[1] \n"
"fmla v11.4s, %15.4s, v0.s[3] \n"
"fmla v12.4s, %24.4s, v0.s[1] \n"
"fmla v13.4s, %24.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v10.4s, %16.4s, v0.s[2] \n"
"fmla v11.4s, %16.4s, v1.s[0] \n"
"fmla v12.4s, %25.4s, v0.s[2] \n"
"fmla v13.4s, %25.4s, v1.s[0] \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0 + 4, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
unsigned short* outptr1_bf16 = top_blob.channel(p + 1);
const float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
// r0
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%2], #64 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum1
"fmla v6.4s, %12.4s, v0.s[0] \n"
"fmla v7.4s, %12.4s, v0.s[2] \n"
"fmla v8.4s, %12.4s, v1.s[0] \n"
"fmla v9.4s, %12.4s, v1.s[2] \n"
"fmla v10.4s, %21.4s, v0.s[0] \n"
"fmla v11.4s, %21.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v1.s[0] \n"
"fmla v13.4s, %21.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%3] \n"
"fmla v6.4s, %13.4s, v0.s[1] \n"
"fmla v7.4s, %13.4s, v0.s[3] \n"
"fmla v8.4s, %13.4s, v1.s[1] \n"
"fmla v9.4s, %13.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v10.4s, %22.4s, v0.s[1] \n"
"fmla v11.4s, %22.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v1.s[1] \n"
"fmla v13.4s, %22.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v2.4h, v3.4h}, [%4], #16 \n"
"fmla v6.4s, %14.4s, v0.s[2] \n"
"fmla v7.4s, %14.4s, v1.s[0] \n"
"fmla v8.4s, %14.4s, v1.s[2] \n"
"fmla v9.4s, %14.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %23.4s, v0.s[2] \n"
"fmla v11.4s, %23.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v1.s[2] \n"
"fmla v13.4s, %23.4s, v4.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %15.4s, v2.s[0] \n"
"fmla v7.4s, %15.4s, v2.s[2] \n"
"fmla v8.4s, %15.4s, v3.s[0] \n"
"fmla v9.4s, %15.4s, v3.s[2] \n"
"fmla v10.4s, %24.4s, v2.s[0] \n"
"fmla v11.4s, %24.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v3.s[0] \n"
"fmla v13.4s, %24.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%4] \n"
"fmla v6.4s, %16.4s, v2.s[1] \n"
"fmla v7.4s, %16.4s, v2.s[3] \n"
"fmla v8.4s, %16.4s, v3.s[1] \n"
"fmla v9.4s, %16.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v10.4s, %25.4s, v2.s[1] \n"
"fmla v11.4s, %25.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v3.s[1] \n"
"fmla v13.4s, %25.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n"
"fmla v6.4s, %17.4s, v2.s[2] \n"
"fmla v7.4s, %17.4s, v3.s[0] \n"
"fmla v8.4s, %17.4s, v3.s[2] \n"
"fmla v9.4s, %17.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %26.4s, v2.s[2] \n"
"fmla v11.4s, %26.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v3.s[2] \n"
"fmla v13.4s, %26.4s, v5.s[0] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %18.4s, v0.s[0] \n"
"fmla v7.4s, %18.4s, v0.s[2] \n"
"fmla v8.4s, %18.4s, v1.s[0] \n"
"fmla v9.4s, %18.4s, v1.s[2] \n"
"fmla v10.4s, %27.4s, v0.s[0] \n"
"fmla v11.4s, %27.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v1.s[0] \n"
"fmla v13.4s, %27.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%5] \n"
"fmla v6.4s, %19.4s, v0.s[1] \n"
"fmla v7.4s, %19.4s, v0.s[3] \n"
"fmla v8.4s, %19.4s, v1.s[1] \n"
"fmla v9.4s, %19.4s, v1.s[3] \n"
"fmla v10.4s, %28.4s, v0.s[1] \n"
"fmla v11.4s, %28.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v1.s[1] \n"
"fmla v13.4s, %28.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %20.4s, v0.s[2] \n"
"fmla v7.4s, %20.4s, v1.s[0] \n"
"fmla v8.4s, %20.4s, v1.s[2] \n"
"fmla v9.4s, %20.4s, v4.s[0] \n"
"fmla v10.4s, %29.4s, v0.s[2] \n"
"fmla v11.4s, %29.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v1.s[2] \n"
"fmla v13.4s, %29.4s, v4.s[0] \n"
"shrn v6.4h, v6.4s, #16 \n"
"shrn v7.4h, v7.4s, #16 \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
// r0
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum0 sum1
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %12.4s, v0.s[0] \n"
"fmla v11.4s, %12.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v0.s[0] \n"
"fmla v13.4s, %21.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%3] \n"
"fmla v10.4s, %13.4s, v0.s[1] \n"
"fmla v11.4s, %13.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v0.s[1] \n"
"fmla v13.4s, %22.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v2.4h}, [%4], #8 \n"
"fmla v10.4s, %14.4s, v0.s[2] \n"
"fmla v11.4s, %14.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v0.s[2] \n"
"fmla v13.4s, %23.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %15.4s, v2.s[0] \n"
"fmla v11.4s, %15.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v2.s[0] \n"
"fmla v13.4s, %24.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%4] \n"
"fmla v10.4s, %16.4s, v2.s[1] \n"
"fmla v11.4s, %16.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v2.s[1] \n"
"fmla v13.4s, %25.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"fmla v10.4s, %17.4s, v2.s[2] \n"
"fmla v11.4s, %17.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v2.s[2] \n"
"fmla v13.4s, %26.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %18.4s, v0.s[0] \n"
"fmla v11.4s, %18.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v0.s[0] \n"
"fmla v13.4s, %27.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%5] \n"
"fmla v10.4s, %19.4s, v0.s[1] \n"
"fmla v11.4s, %19.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v0.s[1] \n"
"fmla v13.4s, %28.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v10.4s, %20.4s, v0.s[2] \n"
"fmla v11.4s, %20.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v0.s[2] \n"
"fmla v13.4s, %29.4s, v1.s[0] \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v10.4h, v11.4h}, [%0], #16 \n"
"st1 {v12.4h, v13.4h}, [%1], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
vst1_u16(outptr1_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum1), 16));
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
outptr0_bf16 += 4;
outptr1_bf16 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const unsigned short* k0 = kernel.channel(p);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0] \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %8.4s, v0.s[0] \n"
"fmla v7.4s, %8.4s, v0.s[2] \n"
"fmla v8.4s, %8.4s, v1.s[0] \n"
"fmla v9.4s, %8.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%1] \n"
"fmla v6.4s, %9.4s, v0.s[1] \n"
"fmla v7.4s, %9.4s, v0.s[3] \n"
"fmla v8.4s, %9.4s, v1.s[1] \n"
"fmla v9.4s, %9.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4h, v3.4h}, [%2], #16 \n"
"fmla v6.4s, %10.4s, v0.s[2] \n"
"fmla v7.4s, %10.4s, v1.s[0] \n"
"fmla v8.4s, %10.4s, v1.s[2] \n"
"fmla v9.4s, %10.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %11.4s, v2.s[0] \n"
"fmla v7.4s, %11.4s, v2.s[2] \n"
"fmla v8.4s, %11.4s, v3.s[0] \n"
"fmla v9.4s, %11.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%2] \n"
"fmla v6.4s, %12.4s, v2.s[1] \n"
"fmla v7.4s, %12.4s, v2.s[3] \n"
"fmla v8.4s, %12.4s, v3.s[1] \n"
"fmla v9.4s, %12.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
// r2
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"fmla v6.4s, %13.4s, v2.s[2] \n"
"fmla v7.4s, %13.4s, v3.s[0] \n"
"fmla v8.4s, %13.4s, v3.s[2] \n"
"fmla v9.4s, %13.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %14.4s, v0.s[0] \n"
"fmla v7.4s, %14.4s, v0.s[2] \n"
"fmla v8.4s, %14.4s, v1.s[0] \n"
"fmla v9.4s, %14.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%3] \n"
"fmla v6.4s, %15.4s, v0.s[1] \n"
"fmla v7.4s, %15.4s, v0.s[3] \n"
"fmla v8.4s, %15.4s, v1.s[1] \n"
"fmla v9.4s, %15.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[2] \n"
"fmla v7.4s, %16.4s, v1.s[0] \n"
"fmla v8.4s, %16.4s, v1.s[2] \n"
"fmla v9.4s, %16.4s, v4.s[0] \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%1, #128] \n"
"vld1.u16 {d12-d13}, [%1]! \n"
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n" // sum0
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%1] \n"
"vmla.f32 q0, %q8, d8[0] \n"
"vmla.f32 q1, %q8, d9[0] \n"
"vmla.f32 q2, %q8, d10[0] \n"
"vmla.f32 q3, %q8, d11[0] \n"
"vmla.f32 q0, %q9, d8[1] \n"
"vmla.f32 q1, %q9, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q9, d10[1] \n"
"vmla.f32 q3, %q9, d11[1] \n"
// r1
"pld [%2, #128] \n"
"vld1.u16 {d12-d13}, [%2]! \n"
"vmla.f32 q0, %q10, d9[0] \n"
"vmla.f32 q1, %q10, d10[0] \n"
"vmla.f32 q2, %q10, d11[0] \n"
"vmla.f32 q3, %q10, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%2] \n"
"vmla.f32 q0, %q11, d8[0] \n"
"vmla.f32 q1, %q11, d9[0] \n"
"vmla.f32 q2, %q11, d10[0] \n"
"vmla.f32 q3, %q11, d11[0] \n"
"vmla.f32 q0, %q12, d8[1] \n"
"vmla.f32 q1, %q12, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q12, d10[1] \n"
"vmla.f32 q3, %q12, d11[1] \n"
// r2
"pld [%3, #128] \n"
"vld1.u16 {d12-d13}, [%3]! \n"
"vmla.f32 q0, %q13, d9[0] \n"
"vmla.f32 q1, %q13, d10[0] \n"
"vmla.f32 q2, %q13, d11[0] \n"
"vmla.f32 q3, %q13, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%3] \n"
"vmla.f32 q0, %q14, d8[0] \n"
"vmla.f32 q1, %q14, d9[0] \n"
"vmla.f32 q2, %q14, d10[0] \n"
"vmla.f32 q3, %q14, d11[0] \n"
"vmla.f32 q0, %q15, d8[1] \n"
"vmla.f32 q1, %q15, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q15, d10[1] \n"
"vmla.f32 q3, %q15, d11[1] \n"
"vmla.f32 q0, %q16, d9[0] \n"
"vmla.f32 q1, %q16, d10[0] \n"
"vmla.f32 q2, %q16, d11[0] \n"
"vmla.f32 q3, %q16, d8[0] \n"
"vstm %0!, {d0-d7} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v8.4s, v9.4s}, [%0] \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"fmul v6.4s, %8.4s, v0.s[0] \n"
"fmul v7.4s, %8.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%1] \n"
"fmla v8.4s, %9.4s, v0.s[1] \n"
"fmla v9.4s, %9.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"fmla v6.4s, %10.4s, v0.s[2] \n"
"fmla v7.4s, %10.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v8.4s, %11.4s, v2.s[0] \n"
"fmla v9.4s, %11.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%2] \n"
"fmla v6.4s, %12.4s, v2.s[1] \n"
"fmla v7.4s, %12.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"fmla v8.4s, %13.4s, v2.s[2] \n"
"fmla v9.4s, %13.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v6.4s, %14.4s, v0.s[0] \n"
"fmla v7.4s, %14.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%3] \n"
"fmla v8.4s, %15.4s, v0.s[1] \n"
"fmla v9.4s, %15.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[2] \n"
"fmla v7.4s, %16.4s, v1.s[0] \n"
"fadd v8.4s, v8.4s, v6.4s \n"
"fadd v9.4s, v9.4s, v7.4s \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%1, #64] \n"
"vld1.u16 {d9}, [%1]! \n"
"pld [%0, #256] \n"
"vld1.f32 {d4-d7}, [%0] \n" // sum0
"vshll.u16 q4, d9, #16 \n"
"vmul.f32 q0, %q8, d8[0] \n"
"vmul.f32 q1, %q8, d9[0] \n"
"vld1.u16 {d11[]}, [%1] \n"
"vmla.f32 q2, %q9, d8[1] \n"
"vmla.f32 q3, %q9, d9[1] \n"
"vshll.u16 q5, d11, #16 \n"
// r1
"pld [%2, #64] \n"
"vld1.u16 {d13}, [%2]! \n"
"vmla.f32 q0, %q10, d9[0] \n"
"vmla.f32 q1, %q10, d10[0] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q2, %q11, d12[0] \n"
"vmla.f32 q3, %q11, d13[0] \n"
"vld1.u16 {d9[]}, [%2] \n"
"vmla.f32 q0, %q12, d12[1] \n"
"vmla.f32 q1, %q12, d13[1] \n"
"vshll.u16 q4, d9, #16 \n"
// r2
"pld [%3, #64] \n"
"vld1.u16 {d11}, [%3]! \n"
"vmla.f32 q2, %q13, d13[0] \n"
"vmla.f32 q3, %q13, d8[0] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q0, %q14, d10[0] \n"
"vmla.f32 q1, %q14, d11[0] \n"
"vld1.u16 {d13[]}, [%3] \n"
"vmla.f32 q2, %q15, d10[1] \n"
"vmla.f32 q3, %q15, d11[1] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q0, %q16, d11[0] \n"
"vmla.f32 q1, %q16, d12[0] \n"
"vadd.f32 q2, q2, q0 \n"
"vadd.f32 q3, q3, q1 \n"
"vst1.f32 {d4-d7}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %10.4s, v0.s[0] \n"
"fmla v7.4s, %10.4s, v0.s[2] \n"
"fmla v8.4s, %10.4s, v1.s[0] \n"
"fmla v9.4s, %10.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%2] \n"
"fmla v6.4s, %11.4s, v0.s[1] \n"
"fmla v7.4s, %11.4s, v0.s[3] \n"
"fmla v8.4s, %11.4s, v1.s[1] \n"
"fmla v9.4s, %11.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
// r1
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v2.4h, v3.4h}, [%3], #16 \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"fmla v8.4s, %12.4s, v1.s[2] \n"
"fmla v9.4s, %12.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %13.4s, v2.s[0] \n"
"fmla v7.4s, %13.4s, v2.s[2] \n"
"fmla v8.4s, %13.4s, v3.s[0] \n"
"fmla v9.4s, %13.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%3] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"fmla v8.4s, %14.4s, v3.s[1] \n"
"fmla v9.4s, %14.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
// r2
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n"
"fmla v6.4s, %15.4s, v2.s[2] \n"
"fmla v7.4s, %15.4s, v3.s[0] \n"
"fmla v8.4s, %15.4s, v3.s[2] \n"
"fmla v9.4s, %15.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"fmla v8.4s, %16.4s, v1.s[0] \n"
"fmla v9.4s, %16.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%4] \n"
"fmla v6.4s, %17.4s, v0.s[1] \n"
"fmla v7.4s, %17.4s, v0.s[3] \n"
"fmla v8.4s, %17.4s, v1.s[1] \n"
"fmla v9.4s, %17.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fmla v8.4s, %18.4s, v1.s[2] \n"
"fmla v9.4s, %18.4s, v4.s[0] \n"
"shrn v6.4h, v6.4s, #16 \n"
"shrn v7.4h, v7.4s, #16 \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%2, #128] \n"
"vld1.u16 {d12-d13}, [%2]! \n"
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n" // sum0
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%2] \n"
"vmla.f32 q0, %q10, d8[0] \n"
"vmla.f32 q1, %q10, d9[0] \n"
"vmla.f32 q2, %q10, d10[0] \n"
"vmla.f32 q3, %q10, d11[0] \n"
"vmla.f32 q0, %q11, d8[1] \n"
"vmla.f32 q1, %q11, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q11, d10[1] \n"
"vmla.f32 q3, %q11, d11[1] \n"
// r1
"pld [%3, #128] \n"
"vld1.u16 {d12-d13}, [%3]! \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vmla.f32 q2, %q12, d11[0] \n"
"vmla.f32 q3, %q12, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%3] \n"
"vmla.f32 q0, %q13, d8[0] \n"
"vmla.f32 q1, %q13, d9[0] \n"
"vmla.f32 q2, %q13, d10[0] \n"
"vmla.f32 q3, %q13, d11[0] \n"
"vmla.f32 q0, %q14, d8[1] \n"
"vmla.f32 q1, %q14, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q14, d10[1] \n"
"vmla.f32 q3, %q14, d11[1] \n"
// r2
"pld [%4, #128] \n"
"vld1.u16 {d12-d13}, [%4]! \n"
"vmla.f32 q0, %q15, d9[0] \n"
"vmla.f32 q1, %q15, d10[0] \n"
"vmla.f32 q2, %q15, d11[0] \n"
"vmla.f32 q3, %q15, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%4] \n"
"vmla.f32 q0, %q16, d8[0] \n"
"vmla.f32 q1, %q16, d9[0] \n"
"vmla.f32 q2, %q16, d10[0] \n"
"vmla.f32 q3, %q16, d11[0] \n"
"vmla.f32 q0, %q17, d8[1] \n"
"vmla.f32 q1, %q17, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vmla.f32 q0, %q18, d9[0] \n"
"vmla.f32 q1, %q18, d10[0] \n"
"vmla.f32 q2, %q18, d11[0] \n"
"vmla.f32 q3, %q18, d8[0] \n"
"vshrn.u32 d0, q0, #16 \n"
"vshrn.u32 d1, q1, #16 \n"
"vshrn.u32 d2, q2, #16 \n"
"vshrn.u32 d3, q3, #16 \n"
"vst1.u16 {d0-d3}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v8.4s, v9.4s}, [%1], #32 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"fmul v6.4s, %10.4s, v0.s[0] \n"
"fmul v7.4s, %10.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%2] \n"
"fmla v8.4s, %11.4s, v0.s[1] \n"
"fmla v9.4s, %11.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3], #8 \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v8.4s, %13.4s, v2.s[0] \n"
"fmla v9.4s, %13.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%3] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n"
"fmla v8.4s, %15.4s, v2.s[2] \n"
"fmla v9.4s, %15.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%4] \n"
"fmla v8.4s, %17.4s, v0.s[1] \n"
"fmla v9.4s, %17.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fadd v8.4s, v8.4s, v6.4s \n"
"fadd v9.4s, v9.4s, v7.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%2, #64] \n"
"vld1.u16 {d9}, [%2]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d4-d7}, [%1]! \n" // sum0
"vshll.u16 q4, d9, #16 \n"
"vmul.f32 q0, %q10, d8[0] \n"
"vmul.f32 q1, %q10, d9[0] \n"
"vld1.u16 {d11[]}, [%2] \n"
"vmla.f32 q2, %q11, d8[1] \n"
"vmla.f32 q3, %q11, d9[1] \n"
"vshll.u16 q5, d11, #16 \n"
// r1
"pld [%3, #64] \n"
"vld1.u16 {d13}, [%3]! \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q2, %q13, d12[0] \n"
"vmla.f32 q3, %q13, d13[0] \n"
"vld1.u16 {d9[]}, [%3] \n"
"vmla.f32 q0, %q14, d12[1] \n"
"vmla.f32 q1, %q14, d13[1] \n"
"vshll.u16 q4, d9, #16 \n"
// r2
"pld [%4, #64] \n"
"vld1.u16 {d11}, [%4]! \n"
"vmla.f32 q2, %q15, d13[0] \n"
"vmla.f32 q3, %q15, d8[0] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q0, %q16, d10[0] \n"
"vmla.f32 q1, %q16, d11[0] \n"
"vld1.u16 {d13[]}, [%4] \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q0, %q18, d11[0] \n"
"vmla.f32 q1, %q18, d12[0] \n"
"vadd.f32 q2, q2, q0 \n"
"vadd.f32 q3, q3, q1 \n"
"vshrn.u32 d2, q2, #16 \n"
"vshrn.u32 d3, q3, #16 \n"
"vst1.u16 {d2-d3}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
outptr0_bf16 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
GB_unaryop__ainv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_int64
// op(A') function: GB_tran__ainv_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_int64
(
int64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp1.c | /*Example to show outlining vs. inlining*/
#ifdef _OPENMP
#include <omp.h>
#endif
void do_sth(int a, int b, int c)
{
}
int main(void)
{
int a,b,c;
#pragma omp parallel private(c) firstprivate(a)
do_sth(a,b,c);
return 0;
}
|
_hypre_struct_mv.h |
/*** DO NOT EDIT THIS FILE DIRECTLY (use 'headers' to generate) ***/
#ifndef hypre_STRUCT_MV_HEADER
#define hypre_STRUCT_MV_HEADER
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "HYPRE_struct_mv.h"
#include "_hypre_utilities.h"
#if defined(HYPRE_USING_RAJA)
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_NEWBOXLOOP_HEADER
#define HYPRE_NEWBOXLOOP_HEADER
extern "C++" {
#include <RAJA/RAJA.hpp>
}
using namespace RAJA;
typedef struct hypre_Boxloop_struct
{
HYPRE_Int lsize0,lsize1,lsize2;
HYPRE_Int strides0,strides1,strides2;
HYPRE_Int bstart0,bstart1,bstart2;
HYPRE_Int bsize0,bsize1,bsize2;
} hypre_Boxloop;
#if defined(HYPRE_USING_CUDA) /* RAJA with CUDA, running on device */
#define BLOCKSIZE 256
#define hypre_RAJA_DEVICE RAJA_DEVICE
#define hypre_raja_exec_policy cuda_exec<BLOCKSIZE>
/* #define hypre_raja_reduce_policy cuda_reduce_atomic<BLOCKSIZE> */
#define hypre_raja_reduce_policy cuda_reduce<BLOCKSIZE>
#define hypre_fence()
/*
#define hypre_fence() \
cudaError err = cudaGetLastError();\
if ( cudaSuccess != err ) {\
printf("\n ERROR zypre_newBoxLoop: %s in %s(%d) function %s\n",cudaGetErrorString(err),__FILE__,__LINE__,__FUNCTION__); \
}\
hypre_CheckErrorDevice(cudaDeviceSynchronize());
*/
#elif defined(HYPRE_USING_DEVICE_OPENMP) /* RAJA with OpenMP (>4.5), running on device */
//TODO
#elif defined(HYPRE_USING_OPENMP) /* RAJA with OpenMP, running on host (CPU) */
#define hypre_RAJA_DEVICE
#define hypre_raja_exec_policy omp_for_exec
#define hypre_raja_reduce_policy omp_reduce
#define hypre_fence()
#else /* RAJA, running on host (CPU) */
#define hypre_RAJA_DEVICE
#define hypre_raja_exec_policy seq_exec
#define hypre_raja_reduce_policy seq_reduce
#define hypre_fence()
#endif /* #if defined(HYPRE_USING_CUDA) */
#define zypre_BoxLoopIncK(k,box,hypre__i) \
HYPRE_Int hypre_boxD##k = 1; \
HYPRE_Int hypre__i = 0; \
hypre__i += (hypre_IndexD(local_idx, 0)*box.strides0 + box.bstart0) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \
hypre__i += (hypre_IndexD(local_idx, 1)*box.strides1 + box.bstart1) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \
hypre__i += (hypre_IndexD(local_idx, 2)*box.strides2 + box.bstart2) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); \
#define zypre_newBoxLoopInit(ndim,loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int d = 0;d < ndim;d ++) \
hypre__tot *= loop_size[d];
#define zypre_newBoxLoopDeclare(box) \
hypre_Index local_idx; \
HYPRE_Int idx_local = idx; \
hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \
idx_local = idx_local / box.lsize0; \
hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \
idx_local = idx_local / box.lsize1; \
hypre_IndexD(local_idx, 2) = idx_local % box.lsize2;
#define zypre_BoxLoopDataDeclareK(k,ndim,loop_size,dbox,start,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = start[0] - dbox->imin[0]; \
databox##k.bsize0 = dbox->imax[0]-dbox->imin[0]; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = start[1] - dbox->imin[1]; \
databox##k.bsize1 = dbox->imax[1]-dbox->imin[1]; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = start[2] - dbox->imin[2]; \
databox##k.bsize2 = dbox->imax[2]-dbox->imin[2]; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define zypre_newBoxLoop0Begin(ndim, loop_size) \
{ \
zypre_newBoxLoopInit(ndim,loop_size); \
forall< hypre_raja_exec_policy >(RangeSegment(0, hypre__tot), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{
#define zypre_newBoxLoop0End() \
}); \
hypre_fence(); \
}
#define zypre_newBoxLoop1Begin(ndim, loop_size, \
dbox1, start1, stride1, i1) \
{ \
zypre_newBoxLoopInit(ndim,loop_size); \
zypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
forall< hypre_raja_exec_policy >(RangeSegment(0, hypre__tot), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{ \
zypre_newBoxLoopDeclare(databox1); \
zypre_BoxLoopIncK(1,databox1,i1);
#define zypre_newBoxLoop1End(i1) \
}); \
hypre_fence(); \
}
#define zypre_newBoxLoop2Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2) \
{ \
zypre_newBoxLoopInit(ndim,loop_size); \
zypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
zypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
forall< hypre_raja_exec_policy >(RangeSegment(0, hypre__tot), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{ \
zypre_newBoxLoopDeclare(databox1); \
zypre_BoxLoopIncK(1,databox1,i1); \
zypre_BoxLoopIncK(2,databox2,i2);
#define zypre_newBoxLoop2End(i1, i2) \
}); \
hypre_fence(); \
}
#define zypre_newBoxLoop3Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{ \
zypre_newBoxLoopInit(ndim,loop_size); \
zypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
zypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
zypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
forall< hypre_raja_exec_policy >(RangeSegment(0, hypre__tot), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{ \
zypre_newBoxLoopDeclare(databox1); \
zypre_BoxLoopIncK(1,databox1,i1); \
zypre_BoxLoopIncK(2,databox2,i2); \
zypre_BoxLoopIncK(3,databox3,i3);
#define zypre_newBoxLoop3End(i1, i2, i3) \
}); \
hypre_fence(); \
}
#define zypre_newBoxLoop4Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{ \
zypre_newBoxLoopInit(ndim,loop_size); \
zypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
zypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
zypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
zypre_BoxLoopDataDeclareK(4,ndim,loop_size,dbox4,start4,stride4); \
forall< hypre_raja_exec_policy >(RangeSegment(0, hypre__tot), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{ \
zypre_newBoxLoopDeclare(databox1); \
zypre_BoxLoopIncK(1,databox1,i1); \
zypre_BoxLoopIncK(2,databox2,i2); \
zypre_BoxLoopIncK(3,databox3,i3); \
zypre_BoxLoopIncK(4,databox4,i4);
#define zypre_newBoxLoop4End(i1, i2, i3, i4) \
}); \
hypre_fence(); \
}
#define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = 0; \
databox##k.bsize0 = 0; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define zypre_newBasicBoxLoop2Begin(ndim, loop_size, \
stride1, i1, \
stride2, i2) \
{ \
zypre_newBoxLoopInit(ndim,loop_size); \
zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \
zypre_BasicBoxLoopDataDeclareK(2,ndim,loop_size,stride2); \
forall< hypre_raja_exec_policy >(RangeSegment(0, hypre__tot), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{ \
zypre_newBoxLoopDeclare(databox1); \
zypre_BoxLoopIncK(1,databox1,i1); \
zypre_BoxLoopIncK(2,databox2,i2); \
#define hypre_LoopBegin(size,idx) \
{ \
forall< hypre_raja_exec_policy >(RangeSegment(0, size), [=] hypre_RAJA_DEVICE (HYPRE_Int idx) \
{
#define hypre_LoopEnd() \
}); \
hypre_fence(); \
}
#define zypre_newBoxLoopSetOneBlock()
#define hypre_newBoxLoopGetIndex(index) \
index[0] = hypre_IndexD(local_idx, 0); \
index[1] = hypre_IndexD(local_idx, 1); \
index[2] = hypre_IndexD(local_idx, 2);
#define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopSetOneBlock zypre_newBoxLoopSetOneBlock
#define hypre_BoxLoopBlock() 0
#define hypre_BoxLoop0Begin zypre_newBoxLoop0Begin
#define hypre_BoxLoop0For zypre_newBoxLoop0For
#define hypre_BoxLoop0End zypre_newBoxLoop0End
#define hypre_BoxLoop1Begin zypre_newBoxLoop1Begin
#define hypre_BoxLoop1For zypre_newBoxLoop1For
#define hypre_BoxLoop1End zypre_newBoxLoop1End
#define hypre_BoxLoop2Begin zypre_newBoxLoop2Begin
#define hypre_BoxLoop2For zypre_newBoxLoop2For
#define hypre_BoxLoop2End zypre_newBoxLoop2End
#define hypre_BoxLoop3Begin zypre_newBoxLoop3Begin
#define hypre_BoxLoop3For zypre_newBoxLoop3For
#define hypre_BoxLoop3End zypre_newBoxLoop3End
#define hypre_BoxLoop4Begin zypre_newBoxLoop4Begin
#define hypre_BoxLoop4For zypre_newBoxLoop4For
#define hypre_BoxLoop4End zypre_newBoxLoop4End
#define hypre_newBoxLoopInit zypre_newBoxLoopInit
#define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin
/* Reduction */
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \
hypre_BoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1)
#define hypre_BoxLoop1ReductionEnd(i1, reducesum) \
hypre_BoxLoop1End(i1)
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, reducesum) \
hypre_BoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2)
#define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \
hypre_BoxLoop2End(i1, i2)
#endif
#elif defined(HYPRE_USING_KOKKOS)
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_NEWBOXLOOP_HEADER
#define HYPRE_NEWBOXLOOP_HEADER
extern "C++" {
#include <Kokkos_Core.hpp>
using namespace Kokkos;
}
#if defined( KOKKOS_HAVE_MPI )
#include <mpi.h>
#endif
typedef struct hypre_Boxloop_struct
{
HYPRE_Int lsize0,lsize1,lsize2;
HYPRE_Int strides0,strides1,strides2;
HYPRE_Int bstart0,bstart1,bstart2;
HYPRE_Int bsize0,bsize1,bsize2;
} hypre_Boxloop;
#define hypre_fence()
/*
#define hypre_fence() \
cudaError err = cudaGetLastError(); \
if ( cudaSuccess != err ) { \
printf("\n ERROR hypre_newBoxLoop: %s in %s(%d) function %s\n", cudaGetErrorString(err),__FILE__,__LINE__,__FUNCTION__); \
} \
hypre_CheckErrorDevice(cudaDeviceSynchronize());
*/
#define hypre_newBoxLoopInit(ndim,loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int d = 0;d < ndim;d ++) \
hypre__tot *= loop_size[d];
#define hypre_BoxLoopIncK(k,box,hypre__i) \
HYPRE_Int hypre_boxD##k = 1; \
HYPRE_Int hypre__i = 0; \
hypre__i += (hypre_IndexD(local_idx, 0)*box.strides0 + box.bstart0) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \
hypre__i += (hypre_IndexD(local_idx, 1)*box.strides1 + box.bstart1) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \
hypre__i += (hypre_IndexD(local_idx, 2)*box.strides2 + box.bstart2) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); \
#define hypre_newBoxLoopDeclare(box) \
hypre_Index local_idx; \
HYPRE_Int idx_local = idx; \
hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \
idx_local = idx_local / box.lsize0; \
hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \
idx_local = idx_local / box.lsize1; \
hypre_IndexD(local_idx, 2) = idx_local % box.lsize2;
#define hypre_BoxLoopDataDeclareK(k,ndim,loop_size,dbox,start,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = start[0] - dbox->imin[0]; \
databox##k.bsize0 = dbox->imax[0]-dbox->imin[0]; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = start[1] - dbox->imin[1]; \
databox##k.bsize1 = dbox->imax[1]-dbox->imin[1]; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = start[2] - dbox->imin[2]; \
databox##k.bsize2 = dbox->imax[2]-dbox->imin[2]; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define hypre_newBoxLoop0Begin(ndim, loop_size) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
Kokkos::parallel_for (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx) \
{
#define hypre_newBoxLoop0End(i1) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop1Begin(ndim, loop_size, \
dbox1, start1, stride1, i1) \
{ \
hypre_newBoxLoopInit(ndim,loop_size) \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
Kokkos::parallel_for (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1);
#define hypre_newBoxLoop1End(i1) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop2Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
Kokkos::parallel_for (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1) \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2);
#define hypre_newBoxLoop2End(i1, i2) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop3Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
Kokkos::parallel_for (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
hypre_BoxLoopIncK(3,databox3,i3);
#define hypre_newBoxLoop3End(i1, i2, i3) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop4Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
hypre_BoxLoopDataDeclareK(4,ndim,loop_size,dbox4,start4,stride4); \
Kokkos::parallel_for (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
hypre_BoxLoopIncK(3,databox3,i3); \
hypre_BoxLoopIncK(4,databox4,i4);
#define hypre_newBoxLoop4End(i1, i2, i3, i4) \
}); \
hypre_fence(); \
}
#define hypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = 0; \
databox##k.bsize0 = 0; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define hypre_newBasicBoxLoop2Begin(ndim, loop_size, \
stride1, i1, \
stride2, i2) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \
hypre_BasicBoxLoopDataDeclareK(2,ndim,loop_size,stride2); \
Kokkos::parallel_for (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
HYPRE_BOX_REDUCTION) \
{ \
HYPRE_Real __hypre_sum_tmp = HYPRE_BOX_REDUCTION; \
HYPRE_BOX_REDUCTION = 0.0; \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
Kokkos::parallel_reduce (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx, \
HYPRE_Real &HYPRE_BOX_REDUCTION) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
#define hypre_BoxLoop1ReductionEnd(i1, HYPRE_BOX_REDUCTION) \
}, HYPRE_BOX_REDUCTION); \
hypre_fence(); \
HYPRE_BOX_REDUCTION += __hypre_sum_tmp; \
}
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
HYPRE_BOX_REDUCTION) \
{ \
HYPRE_Real __hypre_sum_tmp = HYPRE_BOX_REDUCTION; \
HYPRE_BOX_REDUCTION = 0.0; \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
Kokkos::parallel_reduce (hypre__tot, KOKKOS_LAMBDA (HYPRE_Int idx, \
HYPRE_Real &HYPRE_BOX_REDUCTION) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
#define hypre_BoxLoop2ReductionEnd(i1, i2, HYPRE_BOX_REDUCTION) \
}, HYPRE_BOX_REDUCTION); \
hypre_fence(); \
HYPRE_BOX_REDUCTION += __hypre_sum_tmp; \
}
#define hypre_LoopBegin(size,idx) \
{ \
Kokkos::parallel_for(size, KOKKOS_LAMBDA (HYPRE_Int idx) \
{
#define hypre_LoopEnd() \
}); \
hypre_fence(); \
}
/*
extern "C++"
{
struct ColumnSums
{
typedef HYPRE_Real value_type[];
typedef View<HYPRE_Real**>::size_type size_type;
size_type value_count;
View<HYPRE_Real**> X_;
ColumnSums(const View<HYPRE_Real**>& X):value_count(X.dimension_1()),X_(X){}
KOKKOS_INLINE_FUNCTION void
operator()(const size_type i,value_type sum) const
{
for (size_type j = 0;j < value_count;j++)
{
sum[j] += X_(i,j);
}
}
KOKKOS_INLINE_FUNCTION void
join (volatile value_type dst,volatile value_type src) const
{
for (size_type j= 0;j < value_count;j++)
{
dst[j] +=src[j];
}
}
KOKKOS_INLINE_FUNCTION void init(value_type sum) const
{
for (size_type j= 0;j < value_count;j++)
{
sum[j] += 0.0;
}
}
};
}
*/
#define hypre_newBoxLoopSetOneBlock()
#define hypre_newBoxLoopGetIndex(index)\
index[0] = hypre_IndexD(local_idx, 0); index[1] = hypre_IndexD(local_idx, 1); index[2] = hypre_IndexD(local_idx, 2);
#define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopSetOneBlock hypre_newBoxLoopSetOneBlock
#define hypre_BoxLoopBlock() 0
#define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin
#define hypre_BoxLoop0For hypre_newBoxLoop0For
#define hypre_BoxLoop0End hypre_newBoxLoop0End
#define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin
#define hypre_BoxLoop1For hypre_newBoxLoop1For
#define hypre_BoxLoop1End hypre_newBoxLoop1End
#define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin
#define hypre_BoxLoop2For hypre_newBoxLoop2For
#define hypre_BoxLoop2End hypre_newBoxLoop2End
#define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin
#define hypre_BoxLoop3For hypre_newBoxLoop3For
#define hypre_BoxLoop3End hypre_newBoxLoop3End
#define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin
#define hypre_BoxLoop4For hypre_newBoxLoop4For
#define hypre_BoxLoop4End hypre_newBoxLoop4End
#define hypre_BasicBoxLoop2Begin hypre_newBasicBoxLoop2Begin
#endif
#elif defined(HYPRE_USING_CUDA)
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_NEWBOXLOOP_HEADER
#define HYPRE_NEWBOXLOOP_HEADER
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#define HYPRE_LAMBDA [=] __host__ __device__
#define BLOCKSIZE 512
typedef struct hypre_Boxloop_struct
{
HYPRE_Int lsize0,lsize1,lsize2;
HYPRE_Int strides0,strides1,strides2;
HYPRE_Int bstart0,bstart1,bstart2;
HYPRE_Int bsize0,bsize1,bsize2;
} hypre_Boxloop;
#if 1
#define hypre_fence()
/*printf("\n hypre_newBoxLoop in %s(%d) function %s\n",__FILE__,__LINE__,__FUNCTION__);*/
#else
#define hypre_fence() \
{ \
cudaError err = cudaGetLastError(); \
if ( cudaSuccess != err ) \
{ \
printf("\n ERROR hypre_newBoxLoop: %s in %s(%d) function %s\n",cudaGetErrorString(err),__FILE__,__LINE__,__FUNCTION__); \
/* HYPRE_Int *p = NULL; *p = 1; */ \
} \
hypre_CheckErrorDevice(cudaDeviceSynchronize()); \
}
#endif
/* #define hypre_reduce_policy cuda_reduce<BLOCKSIZE> */
extern "C++" {
template <typename LOOP_BODY>
__global__ void forall_kernel(LOOP_BODY loop_body, HYPRE_Int length)
{
HYPRE_Int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < length)
{
loop_body(idx);
}
}
template<typename LOOP_BODY>
void BoxLoopforall(HYPRE_Int policy, HYPRE_Int length, LOOP_BODY loop_body)
{
if (policy == HYPRE_MEMORY_HOST)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (HYPRE_Int idx = 0; idx < length; idx++)
{
loop_body(idx);
}
}
else if (policy == HYPRE_MEMORY_DEVICE)
{
HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE;
const dim3 gDim(gridSize), bDim(BLOCKSIZE);
HYPRE_CUDA_LAUNCH( forall_kernel, gDim, bDim, loop_body, length );
}
else if (policy == 2)
{
}
}
template <typename LOOP_BODY>
__global__ void reductionforall_kernel(LOOP_BODY ReductionLoop,
HYPRE_Int length)
{
ReductionLoop(blockDim.x*blockIdx.x+threadIdx.x, blockDim.x*gridDim.x, length);
}
template<typename LOOP_BODY>
void ReductionBoxLoopforall(HYPRE_Int policy, HYPRE_Int length, LOOP_BODY ReductionLoop)
{
if (length <= 0)
{
return;
}
if (policy == HYPRE_MEMORY_HOST)
{
}
else if (policy == HYPRE_MEMORY_DEVICE)
{
HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE;
gridSize = hypre_min(gridSize, 1024);
/*
hypre_printf("length= %d, blocksize = %d, gridsize = %d\n",
length, BLOCKSIZE, gridSize);
*/
const dim3 gDim(gridSize), bDim(BLOCKSIZE);
HYPRE_CUDA_LAUNCH( reductionforall_kernel, gDim, bDim, ReductionLoop, length );
}
}
}
#define hypre_BoxLoopIncK(k,box,hypre__i) \
HYPRE_Int hypre_boxD##k = 1; \
HYPRE_Int hypre__i = 0; \
hypre__i += (hypre_IndexD(local_idx, 0)*box.strides0 + box.bstart0) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \
hypre__i += (hypre_IndexD(local_idx, 1)*box.strides1 + box.bstart1) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \
hypre__i += (hypre_IndexD(local_idx, 2)*box.strides2 + box.bstart2) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize2 + 1);
#define hypre_newBoxLoopInit(ndim,loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \
hypre__tot *= loop_size[hypre_d];
#define hypre_BasicBoxLoopInit(ndim,loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \
hypre__tot *= loop_size[hypre_d]; \
#define hypre_newBoxLoopDeclare(box) \
hypre_Index local_idx; \
HYPRE_Int idx_local = idx; \
hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \
idx_local = idx_local / box.lsize0; \
hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \
idx_local = idx_local / box.lsize1; \
hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \
#define hypre_newBoxLoop0Begin(ndim, loop_size) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{
#define hypre_newBoxLoop0End() \
}); \
hypre_fence(); \
}
#define hypre_BoxLoopDataDeclareK(k,ndim,loop_size,dbox,start,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = start[0] - dbox->imin[0]; \
databox##k.bsize0 = dbox->imax[0]-dbox->imin[0]; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = start[1] - dbox->imin[1]; \
databox##k.bsize1 = dbox->imax[1]-dbox->imin[1]; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = start[2] - dbox->imin[2]; \
databox##k.bsize2 = dbox->imax[2]-dbox->imin[2]; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define hypre_newBoxLoop1Begin(ndim, loop_size, \
dbox1, start1, stride1, i1) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1);
#define hypre_newBoxLoop1End(i1) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop2Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2);
#define hypre_newBoxLoop2End(i1, i2) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop3Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
hypre_BoxLoopIncK(3,databox3,i3);
#define hypre_newBoxLoop3End(i1, i2,i3) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop4Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
hypre_BoxLoopDataDeclareK(4,ndim,loop_size,dbox4,start4,stride4); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
hypre_BoxLoopIncK(3,databox3,i3); \
hypre_BoxLoopIncK(4,databox4,i4);
#define hypre_newBoxLoop4End(i1, i2, i3, i4) \
}); \
hypre_fence(); \
}
#define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = 0; \
databox##k.bsize0 = 0; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define zypre_newBasicBoxLoop1Begin(ndim, loop_size, \
stride1, i1) \
{ \
hypre_BasicBoxLoopInit(ndim,loop_size); \
zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
#define zypre_newBasicBoxLoop2Begin(ndim, loop_size, \
stride1, i1, \
stride2, i2) \
{ \
hypre_BasicBoxLoopInit(ndim,loop_size); \
zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \
zypre_BasicBoxLoopDataDeclareK(2,ndim,loop_size,stride2); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
#define hypre_LoopBegin(size,idx) \
{ \
BoxLoopforall(hypre_exec_policy,size,HYPRE_LAMBDA (HYPRE_Int idx) \
{
#define hypre_LoopEnd() \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoopGetIndex(index) \
index[0] = hypre_IndexD(local_idx, 0); index[1] = hypre_IndexD(local_idx, 1); index[2] = hypre_IndexD(local_idx, 2);
#define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopSetOneBlock() ;
#define hypre_BoxLoopBlock() 0
#define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin
#define hypre_BoxLoop0For hypre_newBoxLoop0For
#define hypre_BoxLoop0End hypre_newBoxLoop0End
#define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin
#define hypre_BoxLoop1For hypre_newBoxLoop1For
#define hypre_BoxLoop1End hypre_newBoxLoop1End
#define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin
#define hypre_BoxLoop2For hypre_newBoxLoop2For
#define hypre_BoxLoop2End hypre_newBoxLoop2End
#define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin
#define hypre_BoxLoop3For hypre_newBoxLoop3For
#define hypre_BoxLoop3End hypre_newBoxLoop3End
#define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin
#define hypre_BoxLoop4For hypre_newBoxLoop4For
#define hypre_BoxLoop4End hypre_newBoxLoop4End
#define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin
#define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin
/* Reduction BoxLoop1*/
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
reducesum) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \
ReductionBoxLoopforall(hypre_exec_policy, hypre__tot, \
HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \
HYPRE_Int len) \
{ \
for (HYPRE_Int idx = tid; \
idx < len; \
idx += nthreads) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1);
#define hypre_BoxLoop1ReductionEnd(i1, reducesum) \
} \
reducesum.BlockReduce(); \
}); \
hypre_fence(); \
}
/* Reduction BoxLoop2 */
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
reducesum) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \
ReductionBoxLoopforall(hypre_exec_policy, hypre__tot, \
HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \
HYPRE_Int len) \
{ \
for (HYPRE_Int idx = tid; \
idx < len; \
idx += nthreads) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2);
#define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \
} \
reducesum.BlockReduce(); \
}); \
hypre_fence(); \
}
#endif
#elif defined(HYPRE_USING_DEVICE_OPENMP)
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_NEWBOXLOOP_HEADER
#define HYPRE_NEWBOXLOOP_HEADER
#include "omp.h"
/* concatenation:
*/
#define HYPRE_CONCAT2(x, y) x ## _ ## y
#define HYPRE_XCONCAT2(x, y) HYPRE_CONCAT2(x, y)
#define HYPRE_CONCAT3(x, y, z) x ## _ ## y ## _ ## z
#define HYPRE_XCONCAT3(x, y, z) HYPRE_CONCAT3(x, y, z)
/* if use OMP 4.5 default team size and number of teams */
#define AUTO_OMP_TEAM
#ifndef AUTO_OMP_TEAM
/* omp team size (aka. gpu block size) */
#define hypre_gpu_block_size 512
/* the max number of omp teams */
#define hypre_max_num_blocks 1000000
#endif
//#define HYPRE_BOXLOOP_ENTRY_PRINT hypre_printf("%s %s %d\n", __FILE__, __func__, __LINE__);
#define HYPRE_BOXLOOP_ENTRY_PRINT
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
BOX LOOPS [TEAM DISTRIBUTE VERSION]
!!! NOTE: THIS CODE ONLY WORKS FOR DIM <= 3 !!!
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/*
#define hypre_BoxLoop0For()
#define hypre_BoxLoop1For(i1)
#define hypre_BoxLoop2For(i1, i2)
#define hypre_BoxLoop3For(i1, i2, i3)
#define hypre_BoxLoop4For(i1, i2, i3, i4)
*/
#define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopSetOneBlock() ;
#define hypre_BoxLoopBlock() 0
#define hypre_BoxLoop0Begin zypre_omp4_dist_BoxLoop0Begin
#define hypre_BoxLoop0End zypre_omp4_dist_BoxLoopEnd
#define hypre_BoxLoop1Begin zypre_omp4_dist_BoxLoop1Begin
#define hypre_BoxLoop1End zypre_omp4_dist_BoxLoopEnd
#define hypre_BasicBoxLoop2Begin zypre_omp4_dist_BoxLoop2_v2_Begin
#define hypre_BoxLoop2Begin zypre_omp4_dist_BoxLoop2Begin
#define hypre_BoxLoop2End zypre_omp4_dist_BoxLoopEnd
#define hypre_BoxLoop3Begin zypre_omp4_dist_BoxLoop3Begin
#if 0
#define hypre_BoxLoop3_SAME_STRIDE_Begin zypre_omp4_dist_BoxLoop3_SAME_STRIDE_Begin
#endif
#define hypre_BoxLoop3End zypre_omp4_dist_BoxLoopEnd
#define hypre_BoxLoop4Begin zypre_omp4_dist_BoxLoop4Begin
#define hypre_BoxLoop4End zypre_omp4_dist_BoxLoopEnd
#define hypre_LoopBegin zypre_LoopBegin
#define hypre_LoopEnd zypre_omp4_dist_BoxLoopEnd
/* Look for more in struct_ls/red_black_gs.h" */
#define zypre_omp4_dist_BoxLoopEnd(...) \
}\
/*cudaDeviceSynchronize();*/ \
}
#define HYPRE_BOX_REDUCTION
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* host code: declare variables used in the box loop
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopDeclareInit_0(ndim, loop_size) \
HYPRE_Int hypre__ndim = ndim, hypre__tot = 1; \
/* HYPRE_Int hypre__thread; */ \
/* loop size */ \
HYPRE_Int hypre__loop_size_0, hypre__loop_size_1, hypre__loop_size_2; \
if (hypre__ndim > 0) { hypre__loop_size_0 = loop_size[0]; hypre__tot *= hypre__loop_size_0; } \
if (hypre__ndim > 1) { hypre__loop_size_1 = loop_size[1]; hypre__tot *= hypre__loop_size_1; } \
if (hypre__ndim > 2) { hypre__loop_size_2 = loop_size[2]; hypre__tot *= hypre__loop_size_2; }
#ifdef AUTO_OMP_TEAM
#define TEAM_CLAUSE
#define zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) zypre_omp4_BoxLoopDeclareInit_0(ndim, loop_size)
#else
#define TEAM_CLAUSE num_teams(num_blocks) thread_limit(block_size)
#define zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) zypre_omp4_BoxLoopDeclareInit_0(ndim, loop_size) \
/* GPU block numbers and dimensions */ \
HYPRE_Int block_size = hypre_gpu_block_size; \
HYPRE_Int num_blocks = hypre_min(hypre_max_num_blocks, (hypre__tot + hypre_gpu_block_size - 1) / hypre_gpu_block_size);
#endif
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* host code: declare and initialize variables for box k
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxKDeclareInitBody(j, k, startk, dboxk, stridek) \
HYPRE_XCONCAT3(hypre__stride,j,k) = stridek[j]; \
/* precompute some entities used in the parallel for loop */ \
HYPRE_XCONCAT3(hypre__box_start_imin,j,k) = startk[j] - dboxk->imin[j]; \
HYPRE_XCONCAT3(hypre__box_imax_imin,j,k) = dboxk->imax[j] - dboxk->imin[j] + 1;
#define zypre_omp4_BoxKDeclareInit(k, startk, dboxk, stridek)\
/* start - imin */ \
HYPRE_Int HYPRE_XCONCAT3(hypre__box_start_imin,0,k), HYPRE_XCONCAT3(hypre__box_start_imin,1,k), HYPRE_XCONCAT3(hypre__box_start_imin,2,k); \
/* imax - imin + 1 */ \
HYPRE_Int HYPRE_XCONCAT3(hypre__box_imax_imin,0,k), HYPRE_XCONCAT3(hypre__box_imax_imin,1,k), HYPRE_XCONCAT3(hypre__box_imax_imin,2,k); \
/* stride */ \
HYPRE_Int HYPRE_XCONCAT3(hypre__stride,0,k), HYPRE_XCONCAT3(hypre__stride,1,k), HYPRE_XCONCAT3(hypre__stride,2,k); \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxKDeclareInitBody(0, k, startk, dboxk, stridek) } \
if (hypre__ndim > 1) { zypre_omp4_BoxKDeclareInitBody(1, k, startk, dboxk, stridek) } \
if (hypre__ndim > 2) { zypre_omp4_BoxKDeclareInitBody(2, k, startk, dboxk, stridek) } \
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* map clause
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define MAP_CLAUSE0
#define MAP_CLAUSE1
#define MAP_CLAUSE2
#define MAP_CLAUSE3
#define MAP_CLAUSE4
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* if clause
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define IF_CLAUSE if (hypre__global_offload && hypre__tot > 0)
//#define IF_CLAUSE
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* is_device_ptr clause
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
#define IS_DEVICE_CLAUSE DEVICE_VAR
#else
#define IS_DEVICE_CLAUSE
#endif
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* device code for BoxLoop 1, set i1
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopSet1Body(j, i1) \
/* coord in dimension j */ \
hypre__i = hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j); \
/* once */ \
hypre__i_1 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,1) + HYPRE_XCONCAT3(hypre__box_start_imin,j,1);\
/* once */ \
i1 += hypre__i_1 * hypre__I_1; \
/* once */ \
hypre__I_1 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,1); \
/* */ \
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j); \
/* !!! special for BoxLoop1: save the 3-D id */ \
/* HYPRE_XCONCAT2(hypre__id,j) = hypre__i; */
#define zypre_omp4_BoxLoopSet1(i1) \
HYPRE_Int hypre__I_1, hypre__i, hypre__i_1, hypre__J, i1, idx; \
/* HYPRE_Int hypre__id_0, hypre__id_1, hypre__id_2; */ \
hypre__I_1 = 1; idx = hypre__J = hypre__thread; i1 = 0; \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet1Body(0, i1) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet1Body(1, i1) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet1Body(2, i1) }
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* device code for BoxLoop 2, set i1, i2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopSet2Body(j, i1, i2) \
/* */ \
hypre__i = hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j); \
/* twice */ \
hypre__i_1 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,1) + HYPRE_XCONCAT3(hypre__box_start_imin,j,1);\
hypre__i_2 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,2) + HYPRE_XCONCAT3(hypre__box_start_imin,j,2);\
/* twice */ \
i1 += hypre__i_1 * hypre__I_1; \
i2 += hypre__i_2 * hypre__I_2; \
/* twice */ \
hypre__I_1 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,1); \
hypre__I_2 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,2); \
/* */ \
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j);
#define zypre_omp4_BoxLoopSet2(i1, i2) \
HYPRE_Int hypre__I_1, hypre__I_2, hypre__i, hypre__i_1, hypre__i_2, hypre__J, i1, i2; \
hypre__I_1 = hypre__I_2 = 1; hypre__J = hypre__thread; i1 = i2 = 0; \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet2Body(0, i1, i2) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet2Body(1, i1, i2) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet2Body(2, i1, i2) }
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* device code for BoxLoop 3, set i1, i2, i3
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopSet3Body(j, i1, i2, i3) \
/* */ \
hypre__i = hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j); \
/* 3 times */ \
hypre__i_1 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,1) + HYPRE_XCONCAT3(hypre__box_start_imin,j,1);\
hypre__i_2 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,2) + HYPRE_XCONCAT3(hypre__box_start_imin,j,2);\
hypre__i_3 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,3) + HYPRE_XCONCAT3(hypre__box_start_imin,j,3);\
/* 3 times */ \
i1 += hypre__i_1 * hypre__I_1; \
i2 += hypre__i_2 * hypre__I_2; \
i3 += hypre__i_3 * hypre__I_3; \
/* 3 times */ \
hypre__I_1 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,1); \
hypre__I_2 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,2); \
hypre__I_3 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,3); \
/* */ \
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j);
#define zypre_omp4_BoxLoopSet3(i1, i2, i3) \
HYPRE_Int hypre__I_1, hypre__I_2, hypre__I_3, hypre__i, hypre__i_1, hypre__i_2, hypre__i_3, hypre__J, i1, i2, i3; \
hypre__I_1 = hypre__I_2 = hypre__I_3 = 1; hypre__J = hypre__thread; i1 = i2 = i3 = 0; \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet3Body(0, i1, i2, i3) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet3Body(1, i1, i2, i3) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet3Body(2, i1, i2, i3) }
#if 0
/* - - - - - special Box 3: XXX */
#define zypre_omp4_BoxLoopSet3_SAME_STRIDE_Body(j, i1, i2, i3) \
/* */ \
hypre__i = (hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j)) * HYPRE_XCONCAT3(hypre__stride,j,1); \
/* 3 times */ \
hypre__i_1 = hypre__i + HYPRE_XCONCAT3(hypre__box_start_imin,j,1);\
hypre__i_2 = hypre__i + HYPRE_XCONCAT3(hypre__box_start_imin,j,2);\
hypre__i_3 = hypre__i + HYPRE_XCONCAT3(hypre__box_start_imin,j,3);\
/* 3 times */ \
i1 += hypre__i_1 * hypre__I_1; \
i2 += hypre__i_2 * hypre__I_2; \
i3 += hypre__i_3 * hypre__I_3; \
/* 3 times */ \
hypre__I_1 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,1); \
hypre__I_2 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,2); \
hypre__I_3 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,3); \
/* */ \
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j);
#define zypre_omp4_BoxLoopSet3_SAME_STRIDE(i1, i2, o2, i3) \
HYPRE_Int hypre__I_1, hypre__I_2, hypre__I_3, hypre__i, hypre__i_1, hypre__i_2, hypre__i_3, hypre__J; \
hypre__I_1 = hypre__I_2 = hypre__I_3 = 1; hypre__J = hypre__thread; i1 = i3 = 0; i2 = o2;\
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet3_SAME_STRIDE_Body(0, i1, i2, i3) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet3_SAME_STRIDE_Body(1, i1, i2, i3) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet3_SAME_STRIDE_Body(2, i1, i2, i3) }
#endif
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* device code for BoxLoop 4, set i1, i2, i3, i4
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopSet4Body(j, i1, i2, i3, i4) \
/* */ \
hypre__i = hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j); \
/* 4 times */ \
hypre__i_1 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,1) + HYPRE_XCONCAT3(hypre__box_start_imin,j,1);\
hypre__i_2 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,2) + HYPRE_XCONCAT3(hypre__box_start_imin,j,2);\
hypre__i_3 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,3) + HYPRE_XCONCAT3(hypre__box_start_imin,j,3);\
hypre__i_4 = hypre__i * HYPRE_XCONCAT3(hypre__stride,j,4) + HYPRE_XCONCAT3(hypre__box_start_imin,j,4);\
/* 4 times */ \
i1 += hypre__i_1 * hypre__I_1; \
i2 += hypre__i_2 * hypre__I_2; \
i3 += hypre__i_3 * hypre__I_3; \
i4 += hypre__i_4 * hypre__I_4; \
/* 4 times */ \
hypre__I_1 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,1); \
hypre__I_2 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,2); \
hypre__I_3 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,3); \
hypre__I_4 *= HYPRE_XCONCAT3(hypre__box_imax_imin,j,4); \
/* */ \
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j);
#define zypre_omp4_BoxLoopSet4(i1, i2, i3, i4) \
HYPRE_Int hypre__I_1, hypre__I_2, hypre__I_3, hypre__I_4, hypre__i, hypre__i_1, hypre__i_2, hypre__i_3, hypre__i_4, hypre__J, i1, i2, i3, i4; \
hypre__I_1 = hypre__I_2 = hypre__I_3 = hypre__I_4 = 1; hypre__J = hypre__thread; i1 = i2 = i3 = i4 = 0; \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet4Body(0, i1, i2, i3, i4) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet4Body(1, i1, i2, i3, i4) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet4Body(2, i1, i2, i3, i4) }
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 0
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop0Begin(ndim, loop_size) \
{\
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE0 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 1
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1) \
{\
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE1 IS_DEVICE_CLAUSE HYPRE_BOX_REDUCTION TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet1(i1)
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, dbox2, start2, stride2, i2) \
{\
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
zypre_omp4_BoxKDeclareInit(2, start2, dbox2, stride2) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE2 IS_DEVICE_CLAUSE HYPRE_BOX_REDUCTION TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet2(i1, i2)
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 3
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop3Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{\
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
zypre_omp4_BoxKDeclareInit(2, start2, dbox2, stride2) \
zypre_omp4_BoxKDeclareInit(3, start3, dbox3, stride3) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE3 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet3(i1, i2, i3)
#if 0
#define zypre_omp4_dist_BoxLoop3_SAME_STRIDE_Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, o2, \
dbox3, start3, stride3, i3) \
{\
/* host code: */ \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
zypre_omp4_BoxKDeclareInit(2, start2, dbox2, stride2) \
zypre_omp4_BoxKDeclareInit(3, start3, dbox3, stride3) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE3 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet3_SAME_STRIDE(i1, i2, o2, i3)
#endif
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 4
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop4Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{\
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
zypre_omp4_BoxKDeclareInit(2, start2, dbox2, stride2) \
zypre_omp4_BoxKDeclareInit(3, start3, dbox3, stride3) \
zypre_omp4_BoxKDeclareInit(4, start4, dbox4, stride4) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE4 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet4(i1, i2, i3, i4)
#if 0
/* no longer needed, use the above BoxLoop's for reductions */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 1 reduction
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_Red_BoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1, xsum) \
{\
/* host code: */ \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE1 map(tofrom: xsum) reduction(+:xsum) TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet1(i1)
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BoxLoop 2 reduction
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_Red_BoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, dbox2, start2, stride2, i2, xsum) \
{\
/* host code: */ \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit(1, start1, dbox1, stride1) \
zypre_omp4_BoxKDeclareInit(2, start2, dbox2, stride2) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE2 map(tofrom: xsum) reduction(+:xsum) TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet2(i1, i2)
#endif
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* v2
* host code: declare and initialize variables for box k
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxKDeclareInit_v2(k, stridek)\
/* stridek[0,1,2] */ \
HYPRE_Int HYPRE_XCONCAT3(hypre__stride,0,k), HYPRE_XCONCAT3(hypre__stride,1,k), HYPRE_XCONCAT3(hypre__stride,2,k); \
/*if (hypre__ndim > 0)*/ { HYPRE_XCONCAT3(hypre__stride,0,k) = stridek[0]; } \
if (hypre__ndim > 1) { HYPRE_XCONCAT3(hypre__stride,1,k) = stridek[1]; } \
if (hypre__ndim > 2) { HYPRE_XCONCAT3(hypre__stride,2,k) = stridek[2]; } \
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* v2
* device code for BoxLoop 1, set i1
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopSet1Body_v2(j, i1) \
i1 += ( hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j) ) * HYPRE_XCONCAT3(hypre__stride,j,1);\
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j);
#define zypre_omp4_BoxLoopSet1_v2(i1, idx) \
HYPRE_Int hypre__J, i1, idx; \
idx = hypre__J = hypre__thread; i1 = 0; \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet1Body_v2(0, i1) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet1Body_v2(1, i1) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet1Body_v2(2, i1) }
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* v2: Basic
* BoxLoop 1
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop1_v2_Begin(ndim, loop_size, stride1, i1, idx) \
{\
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit_v2(1, stride1) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE1 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{\
zypre_omp4_BoxLoopSet1_v2(i1, idx)
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* v2
* device code for BoxLoop 2, set i1, i2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_BoxLoopSet2Body_v2(j, i1, i2) \
hypre__i = hypre__J % HYPRE_XCONCAT2(hypre__loop_size,j); \
/* twice */ \
i1 += hypre__i * HYPRE_XCONCAT3(hypre__stride,j,1); \
i2 += hypre__i * HYPRE_XCONCAT3(hypre__stride,j,2); \
hypre__J /= HYPRE_XCONCAT2(hypre__loop_size,j);
#define zypre_omp4_BoxLoopSet2_v2(i1, i2) \
HYPRE_Int hypre__i, hypre__J, i1, i2; \
hypre__J = hypre__thread; i1 = i2 = 0; \
/*if (hypre__ndim > 0)*/ { zypre_omp4_BoxLoopSet2Body_v2(0, i1, i2) } \
if (hypre__ndim > 1) { zypre_omp4_BoxLoopSet2Body_v2(1, i1, i2) } \
if (hypre__ndim > 2) { zypre_omp4_BoxLoopSet2Body_v2(2, i1, i2) }
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* v2: Basic
* BoxLoop 2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_omp4_dist_BoxLoop2_v2_Begin(ndim, loop_size, stride1, i1, stride2, i2) \
{ \
/* host code: */ \
HYPRE_BOXLOOP_ENTRY_PRINT \
zypre_omp4_BoxLoopDeclareInit(ndim, loop_size) \
zypre_omp4_BoxKDeclareInit_v2(1, stride1) \
zypre_omp4_BoxKDeclareInit_v2(2, stride2) \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE2 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int hypre__thread = 0; hypre__thread < hypre__tot; hypre__thread++) \
{ \
zypre_omp4_BoxLoopSet2_v2(i1, i2)
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Basic Loop
* - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
#define zypre_LoopBegin(size, idx) \
{ \
/* host code: */ \
/* HYPRE_Int idx = 0; */\
HYPRE_Int hypre__tot = size; \
HYPRE_BOXLOOP_ENTRY_PRINT \
/* device code: */ \
_Pragma (HYPRE_XSTR(omp target teams distribute parallel for IF_CLAUSE MAP_CLAUSE2 IS_DEVICE_CLAUSE TEAM_CLAUSE)) \
for (HYPRE_Int idx = 0; idx < hypre__tot; idx++) \
{
#if 0
#define hypre_LoopBegin0(size, idx) \
{ \
HYPRE_Int idx, hypre__size = size; \
for (idx = 0; idx < hypre__size; idx++) \
{
#define hypre_newBoxLoopGetIndex(index) \
index[0] = hypre__id_0; \
index[1] = hypre__id_1; \
index[2] = hypre__id_2;
#endif
/* Reduction */
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \
hypre_BoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1)
#define hypre_BoxLoop1ReductionEnd(i1, reducesum) \
hypre_BoxLoop1End(i1)
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, reducesum) \
hypre_BoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2)
#define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \
hypre_BoxLoop2End(i1, i2)
#endif
#else
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_NEWBOXLOOP_HEADER
#define HYPRE_NEWBOXLOOP_HEADER
#ifdef HYPRE_USING_OPENMP
#define HYPRE_BOX_REDUCTION
#ifdef WIN32
#define Pragma(x) __pragma(HYPRE_XSTR(x))
#else
#define Pragma(x) _Pragma(HYPRE_XSTR(x))
#endif
#define OMP1 Pragma(omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_BOX_REDUCTION HYPRE_SMP_SCHEDULE)
#else
#define OMP1
#endif
typedef struct hypre_Boxloop_struct
{
HYPRE_Int lsize0,lsize1,lsize2;
HYPRE_Int strides0,strides1,strides2;
HYPRE_Int bstart0,bstart1,bstart2;
HYPRE_Int bsize0,bsize1,bsize2;
} hypre_Boxloop;
#define zypre_newBoxLoop0Begin(ndim, loop_size) \
{ \
zypre_BoxLoopDeclare(); \
zypre_BoxLoopInit(ndim, loop_size); \
OMP1 \
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++) \
{ \
zypre_BoxLoopSet(); \
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++) \
{ \
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++) \
{
#define zypre_newBoxLoop0End() \
} \
zypre_BoxLoopInc1(); \
zypre_BoxLoopInc2(); \
} \
} \
}
#define zypre_newBoxLoop1Begin(ndim, loop_size, \
dbox1, start1, stride1, i1) \
{ \
HYPRE_Int i1; \
zypre_BoxLoopDeclare(); \
zypre_BoxLoopDeclareK(1); \
zypre_BoxLoopInit(ndim, loop_size); \
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1); \
OMP1 \
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++) \
{ \
HYPRE_Int i1; \
zypre_BoxLoopSet(); \
zypre_BoxLoopSetK(1, i1); \
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++) \
{ \
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++) \
{
#define zypre_newBoxLoop1End(i1) \
i1 += hypre__i0inc1; \
} \
zypre_BoxLoopInc1(); \
i1 += hypre__ikinc1[hypre__d]; \
zypre_BoxLoopInc2(); \
} \
} \
}
#define zypre_newBoxLoop2Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2) \
{ \
HYPRE_Int i1, i2; \
zypre_BoxLoopDeclare(); \
zypre_BoxLoopDeclareK(1); \
zypre_BoxLoopDeclareK(2); \
zypre_BoxLoopInit(ndim, loop_size); \
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1); \
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2); \
OMP1 \
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++) \
{ \
HYPRE_Int i1, i2; \
zypre_BoxLoopSet(); \
zypre_BoxLoopSetK(1, i1); \
zypre_BoxLoopSetK(2, i2); \
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++) \
{ \
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++) \
{
#define zypre_newBoxLoop2End(i1, i2) \
i1 += hypre__i0inc1; \
i2 += hypre__i0inc2; \
} \
zypre_BoxLoopInc1(); \
i1 += hypre__ikinc1[hypre__d]; \
i2 += hypre__ikinc2[hypre__d]; \
zypre_BoxLoopInc2(); \
} \
} \
}
#define zypre_newBoxLoop3Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{ \
HYPRE_Int i1, i2, i3; \
zypre_BoxLoopDeclare(); \
zypre_BoxLoopDeclareK(1); \
zypre_BoxLoopDeclareK(2); \
zypre_BoxLoopDeclareK(3); \
zypre_BoxLoopInit(ndim, loop_size); \
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1); \
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2); \
zypre_BoxLoopInitK(3, dbox3, start3, stride3, i3); \
OMP1 \
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++) \
{ \
HYPRE_Int i1, i2, i3; \
zypre_BoxLoopSet(); \
zypre_BoxLoopSetK(1, i1); \
zypre_BoxLoopSetK(2, i2); \
zypre_BoxLoopSetK(3, i3); \
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++) \
{ \
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++) \
{
#define zypre_newBoxLoop3End(i1, i2, i3) \
i1 += hypre__i0inc1; \
i2 += hypre__i0inc2; \
i3 += hypre__i0inc3; \
} \
zypre_BoxLoopInc1(); \
i1 += hypre__ikinc1[hypre__d]; \
i2 += hypre__ikinc2[hypre__d]; \
i3 += hypre__ikinc3[hypre__d]; \
zypre_BoxLoopInc2(); \
} \
} \
}
#define zypre_newBoxLoop4Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{ \
HYPRE_Int i1, i2, i3, i4; \
zypre_BoxLoopDeclare(); \
zypre_BoxLoopDeclareK(1); \
zypre_BoxLoopDeclareK(2); \
zypre_BoxLoopDeclareK(3); \
zypre_BoxLoopDeclareK(4); \
zypre_BoxLoopInit(ndim, loop_size); \
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1); \
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2); \
zypre_BoxLoopInitK(3, dbox3, start3, stride3, i3); \
zypre_BoxLoopInitK(4, dbox4, start4, stride4, i4); \
OMP1 \
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++) \
{ \
HYPRE_Int i1, i2, i3, i4; \
zypre_BoxLoopSet(); \
zypre_BoxLoopSetK(1, i1); \
zypre_BoxLoopSetK(2, i2); \
zypre_BoxLoopSetK(3, i3); \
zypre_BoxLoopSetK(4, i4); \
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++) \
{ \
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++) \
{
#define zypre_newBoxLoop4End(i1, i2, i3, i4) \
i1 += hypre__i0inc1; \
i2 += hypre__i0inc2; \
i3 += hypre__i0inc3; \
i4 += hypre__i0inc4; \
} \
zypre_BoxLoopInc1(); \
i1 += hypre__ikinc1[hypre__d]; \
i2 += hypre__ikinc2[hypre__d]; \
i3 += hypre__ikinc3[hypre__d]; \
i4 += hypre__ikinc4[hypre__d]; \
zypre_BoxLoopInc2(); \
} \
} \
}
#define zypre_newBasicBoxLoop2Begin(ndim, loop_size, \
stride1, i1, \
stride2, i2) \
{ \
zypre_BoxLoopDeclare(); \
zypre_BoxLoopDeclareK(1); \
zypre_BoxLoopDeclareK(2); \
zypre_BoxLoopInit(ndim, loop_size); \
zypre_BasicBoxLoopInitK(1, stride1); \
zypre_BasicBoxLoopInitK(2, stride2); \
OMP1 \
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++) \
{ \
HYPRE_Int i1, i2; \
zypre_BoxLoopSet(); \
zypre_BoxLoopSetK(1, i1); \
zypre_BoxLoopSetK(2, i2); \
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++) \
{ \
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++) \
{
#define hypre_LoopBegin(size,idx) \
{ \
HYPRE_Int idx; \
for (idx = 0;idx < size;idx ++) \
{
#define hypre_LoopEnd() \
} \
}
#define hypre_newBoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopSetOneBlock zypre_BoxLoopSetOneBlock
#define hypre_BoxLoopBlock zypre_BoxLoopBlock
#define hypre_BoxLoop0Begin zypre_newBoxLoop0Begin
#define hypre_BoxLoop0End zypre_newBoxLoop0End
#define hypre_BoxLoop1Begin zypre_newBoxLoop1Begin
#define hypre_BoxLoop1End zypre_newBoxLoop1End
#define hypre_BoxLoop2Begin zypre_newBoxLoop2Begin
#define hypre_BoxLoop2End zypre_newBoxLoop2End
#define hypre_BoxLoop3Begin zypre_newBoxLoop3Begin
#define hypre_BoxLoop3End zypre_newBoxLoop3End
#define hypre_BoxLoop4Begin zypre_newBoxLoop4Begin
#define hypre_BoxLoop4End zypre_newBoxLoop4End
#define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin
/* Reduction */
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \
hypre_BoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1)
#define hypre_BoxLoop1ReductionEnd(i1, reducesum) \
hypre_BoxLoop1End(i1)
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, reducesum) \
hypre_BoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2)
#define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \
hypre_BoxLoop2End(i1, i2)
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the Box structures
*
*****************************************************************************/
#ifndef hypre_BOX_HEADER
#define hypre_BOX_HEADER
#ifndef HYPRE_MAXDIM
#define HYPRE_MAXDIM 3
#endif
/*--------------------------------------------------------------------------
* hypre_Index:
* This is used to define indices in index space, or dimension
* sizes of boxes.
*
* The spatial dimensions x, y, and z may be specified by the
* integers 0, 1, and 2, respectively (see the hypre_IndexD macro below).
* This simplifies the code in the hypre_Box class by reducing code
* replication.
*--------------------------------------------------------------------------*/
typedef HYPRE_Int hypre_Index[HYPRE_MAXDIM];
typedef HYPRE_Int *hypre_IndexRef;
/*--------------------------------------------------------------------------
* hypre_Box:
*--------------------------------------------------------------------------*/
typedef struct hypre_Box_struct
{
hypre_Index imin; /* min bounding indices */
hypre_Index imax; /* max bounding indices */
HYPRE_Int ndim; /* number of dimensions */
} hypre_Box;
/*--------------------------------------------------------------------------
* hypre_BoxArray:
* An array of boxes.
* Since size can be zero, need to store ndim separately.
*--------------------------------------------------------------------------*/
typedef struct hypre_BoxArray_struct
{
hypre_Box *boxes; /* Array of boxes */
HYPRE_Int size; /* Size of box array */
HYPRE_Int alloc_size; /* Size of currently alloced space */
HYPRE_Int ndim; /* number of dimensions */
} hypre_BoxArray;
#define hypre_BoxArrayExcess 10
/*--------------------------------------------------------------------------
* hypre_BoxArrayArray:
* An array of box arrays.
* Since size can be zero, need to store ndim separately.
*--------------------------------------------------------------------------*/
typedef struct hypre_BoxArrayArray_struct
{
hypre_BoxArray **box_arrays; /* Array of pointers to box arrays */
HYPRE_Int size; /* Size of box array array */
HYPRE_Int ndim; /* number of dimensions */
} hypre_BoxArrayArray;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_Index
*--------------------------------------------------------------------------*/
#define hypre_IndexD(index, d) (index[d])
/* Avoid using these macros */
#define hypre_IndexX(index) hypre_IndexD(index, 0)
#define hypre_IndexY(index) hypre_IndexD(index, 1)
#define hypre_IndexZ(index) hypre_IndexD(index, 2)
/*--------------------------------------------------------------------------
* Member functions: hypre_Index
*--------------------------------------------------------------------------*/
/*----- Avoid using these Index macros -----*/
#define hypre_SetIndex3(index, ix, iy, iz) \
( hypre_IndexD(index, 0) = ix,\
hypre_IndexD(index, 1) = iy,\
hypre_IndexD(index, 2) = iz )
#define hypre_ClearIndex(index) hypre_SetIndex(index, 0)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_Box
*--------------------------------------------------------------------------*/
#define hypre_BoxIMin(box) ((box) -> imin)
#define hypre_BoxIMax(box) ((box) -> imax)
#define hypre_BoxNDim(box) ((box) -> ndim)
#define hypre_BoxIMinD(box, d) (hypre_IndexD(hypre_BoxIMin(box), d))
#define hypre_BoxIMaxD(box, d) (hypre_IndexD(hypre_BoxIMax(box), d))
#define hypre_BoxSizeD(box, d) \
hypre_max(0, (hypre_BoxIMaxD(box, d) - hypre_BoxIMinD(box, d) + 1))
#define hypre_IndexDInBox(index, d, box) \
( hypre_IndexD(index, d) >= hypre_BoxIMinD(box, d) && \
hypre_IndexD(index, d) <= hypre_BoxIMaxD(box, d) )
/* The first hypre_CCBoxIndexRank is better style because it is similar to
hypre_BoxIndexRank. The second one sometimes avoids compiler warnings. */
#define hypre_CCBoxIndexRank(box, index) 0
#define hypre_CCBoxIndexRank_noargs() 0
#define hypre_CCBoxOffsetDistance(box, index) 0
/*----- Avoid using these Box macros -----*/
#define hypre_BoxSizeX(box) hypre_BoxSizeD(box, 0)
#define hypre_BoxSizeY(box) hypre_BoxSizeD(box, 1)
#define hypre_BoxSizeZ(box) hypre_BoxSizeD(box, 2)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_BoxArray
*--------------------------------------------------------------------------*/
#define hypre_BoxArrayBoxes(box_array) ((box_array) -> boxes)
#define hypre_BoxArrayBox(box_array, i) &((box_array) -> boxes[(i)])
#define hypre_BoxArraySize(box_array) ((box_array) -> size)
#define hypre_BoxArrayAllocSize(box_array) ((box_array) -> alloc_size)
#define hypre_BoxArrayNDim(box_array) ((box_array) -> ndim)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_BoxArrayArray
*--------------------------------------------------------------------------*/
#define hypre_BoxArrayArrayBoxArrays(box_array_array) \
((box_array_array) -> box_arrays)
#define hypre_BoxArrayArrayBoxArray(box_array_array, i) \
((box_array_array) -> box_arrays[(i)])
#define hypre_BoxArrayArraySize(box_array_array) \
((box_array_array) -> size)
#define hypre_BoxArrayArrayNDim(box_array_array) \
((box_array_array) -> ndim)
/*--------------------------------------------------------------------------
* Looping macros:
*--------------------------------------------------------------------------*/
#define hypre_ForBoxI(i, box_array) \
for (i = 0; i < hypre_BoxArraySize(box_array); i++)
#define hypre_ForBoxArrayI(i, box_array_array) \
for (i = 0; i < hypre_BoxArrayArraySize(box_array_array); i++)
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#define hypre_SerialBoxLoop0Begin(ndim, loop_size)\
{\
zypre_BoxLoopDeclare();\
zypre_BoxLoopInit(ndim, loop_size);\
hypre_BoxLoopSetOneBlock();\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
zypre_BoxLoopSet();\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define hypre_SerialBoxLoop0End()\
}\
zypre_BoxLoopInc1();\
zypre_BoxLoopInc2();\
}\
}\
}
#define hypre_SerialBoxLoop1Begin(ndim, loop_size,\
dbox1, start1, stride1, i1)\
{\
HYPRE_Int i1;\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1);\
zypre_BoxLoopSetOneBlock();\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
zypre_BoxLoopSet();\
zypre_BoxLoopSetK(1, i1);\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define hypre_SerialBoxLoop1End(i1)\
i1 += hypre__i0inc1;\
}\
zypre_BoxLoopInc1();\
i1 += hypre__ikinc1[hypre__d];\
zypre_BoxLoopInc2();\
}\
}\
}
#define hypre_SerialBoxLoop2Begin(ndim, loop_size,\
dbox1, start1, stride1, i1,\
dbox2, start2, stride2, i2)\
{\
HYPRE_Int i1,i2;\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopDeclareK(2);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1);\
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2);\
zypre_BoxLoopSetOneBlock();\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
zypre_BoxLoopSet();\
zypre_BoxLoopSetK(1, i1);\
zypre_BoxLoopSetK(2, i2);\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define hypre_SerialBoxLoop2End(i1, i2)\
i1 += hypre__i0inc1;\
i2 += hypre__i0inc2;\
}\
zypre_BoxLoopInc1();\
i1 += hypre__ikinc1[hypre__d];\
i2 += hypre__ikinc2[hypre__d];\
zypre_BoxLoopInc2();\
}\
}\
}
#define ZYPRE_BOX_PRIVATE hypre__IN,hypre__JN,hypre__I,hypre__J,hypre__d,hypre__i
#define HYPRE_BOX_PRIVATE ZYPRE_BOX_PRIVATE
#define zypre_BoxLoopDeclare() \
HYPRE_Int hypre__tot, hypre__div, hypre__mod;\
HYPRE_Int hypre__block, hypre__num_blocks;\
HYPRE_Int hypre__d, hypre__ndim;\
HYPRE_Int hypre__I, hypre__J, hypre__IN, hypre__JN;\
HYPRE_Int hypre__i[HYPRE_MAXDIM+1], hypre__n[HYPRE_MAXDIM+1]
#define zypre_BoxLoopDeclareK(k) \
HYPRE_Int hypre__ikstart##k, hypre__i0inc##k;\
HYPRE_Int hypre__sk##k[HYPRE_MAXDIM], hypre__ikinc##k[HYPRE_MAXDIM+1]
#define zypre_BoxLoopInit(ndim, loop_size) \
hypre__ndim = ndim;\
hypre__n[0] = loop_size[0];\
hypre__tot = 1;\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
hypre__n[hypre__d] = loop_size[hypre__d];\
hypre__tot *= hypre__n[hypre__d];\
}\
hypre__n[hypre__ndim] = 2;\
hypre__num_blocks = hypre_NumThreads();\
if (hypre__tot < hypre__num_blocks)\
{\
hypre__num_blocks = hypre__tot;\
}\
if (hypre__num_blocks > 0)\
{\
hypre__div = hypre__tot / hypre__num_blocks;\
hypre__mod = hypre__tot % hypre__num_blocks;\
}
#define zypre_BoxLoopInitK(k, dboxk, startk, stridek, ik) \
hypre__sk##k[0] = stridek[0];\
hypre__ikinc##k[0] = 0;\
ik = hypre_BoxSizeD(dboxk, 0); /* temporarily use ik */\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
hypre__sk##k[hypre__d] = ik*stridek[hypre__d];\
hypre__ikinc##k[hypre__d] = hypre__ikinc##k[hypre__d-1] +\
hypre__sk##k[hypre__d] - hypre__n[hypre__d-1]*hypre__sk##k[hypre__d-1];\
ik *= hypre_BoxSizeD(dboxk, hypre__d);\
}\
hypre__i0inc##k = hypre__sk##k[0];\
hypre__ikinc##k[hypre__ndim] = 0;\
hypre__ikstart##k = hypre_BoxIndexRank(dboxk, startk)
#define zypre_BoxLoopSet() \
hypre__IN = hypre__n[0];\
if (hypre__num_blocks > 1)/* in case user sets num_blocks to 1 */\
{\
hypre__JN = hypre__div + ((hypre__mod > hypre__block) ? 1 : 0);\
hypre__J = hypre__block * hypre__div + hypre_min(hypre__mod, hypre__block);\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
hypre__i[hypre__d] = hypre__J % hypre__n[hypre__d];\
hypre__J /= hypre__n[hypre__d];\
}\
}\
else\
{\
hypre__JN = hypre__tot;\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
hypre__i[hypre__d] = 0;\
}\
}\
hypre__i[hypre__ndim] = 0
#define zypre_BoxLoopSetK(k, ik) \
ik = hypre__ikstart##k;\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
ik += hypre__i[hypre__d]*hypre__sk##k[hypre__d];\
}
#define zypre_BoxLoopInc1() \
hypre__d = 1;\
while ((hypre__i[hypre__d]+2) > hypre__n[hypre__d])\
{\
hypre__d++;\
}
#define zypre_BoxLoopInc2() \
hypre__i[hypre__d]++;\
while (hypre__d > 1)\
{\
hypre__d--;\
hypre__i[hypre__d] = 0;\
}
/* This returns the loop index (of type hypre_Index) for the current iteration,
* where the numbering starts at 0. It works even when threading is turned on,
* as long as 'index' is declared to be private. */
#define zypre_BoxLoopGetIndex(index) \
index[0] = hypre__I;\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
index[hypre__d] = hypre__i[hypre__d];\
}
/* Use this before the For macros below to force only one block */
#define zypre_BoxLoopSetOneBlock() hypre__num_blocks = 1
/* Use this to get the block iteration inside a BoxLoop */
#define zypre_BoxLoopBlock() hypre__block
/* FIXME: Remove !!! */
/*-----------------------------------*/
#define zypre_BoxLoop0Begin(ndim, loop_size)\
{\
zypre_BoxLoopDeclare();\
zypre_BoxLoopInit(ndim, loop_size);
#define zypre_BoxLoop0For()\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
zypre_BoxLoopSet();\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define zypre_BoxLoop0End()\
}\
zypre_BoxLoopInc1();\
zypre_BoxLoopInc2();\
}\
}\
}
/*-----------------------------------*/
#define zypre_BoxLoop1Begin(ndim, loop_size,\
dbox1, start1, stride1, i1)\
{\
HYPRE_Int i1;\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1);
#define zypre_BoxLoop1For(i1)\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
HYPRE_Int i1;\
zypre_BoxLoopSet();\
zypre_BoxLoopSetK(1, i1);\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define zypre_BoxLoop1End(i1)\
i1 += hypre__i0inc1;\
}\
zypre_BoxLoopInc1();\
i1 += hypre__ikinc1[hypre__d];\
zypre_BoxLoopInc2();\
}\
}\
}
/*-----------------------------------*/
#define zypre_BoxLoop2Begin(ndim, loop_size,\
dbox1, start1, stride1, i1,\
dbox2, start2, stride2, i2)\
{\
HYPRE_Int i1,i2;\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopDeclareK(2);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1);\
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2);
#define zypre_BoxLoop2For(i1, i2)\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
HYPRE_Int i1,i2;\
zypre_BoxLoopSet();\
zypre_BoxLoopSetK(1, i1);\
zypre_BoxLoopSetK(2, i2);\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define zypre_BoxLoop2End(i1, i2)\
i1 += hypre__i0inc1;\
i2 += hypre__i0inc2;\
}\
zypre_BoxLoopInc1();\
i1 += hypre__ikinc1[hypre__d];\
i2 += hypre__ikinc2[hypre__d];\
zypre_BoxLoopInc2();\
}\
}\
}
/*-----------------------------------*/
#define zypre_BoxLoop3Begin(ndim, loop_size,\
dbox1, start1, stride1, i1,\
dbox2, start2, stride2, i2,\
dbox3, start3, stride3, i3)\
{\
HYPRE_Int i1,i2,i3;\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopDeclareK(2);\
zypre_BoxLoopDeclareK(3);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1);\
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2);\
zypre_BoxLoopInitK(3, dbox3, start3, stride3, i3);
#define zypre_BoxLoop3For(i1, i2, i3)\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
HYPRE_Int i1,i2,i3;\
zypre_BoxLoopSet();\
zypre_BoxLoopSetK(1, i1);\
zypre_BoxLoopSetK(2, i2);\
zypre_BoxLoopSetK(3, i3);\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define zypre_BoxLoop3End(i1, i2, i3)\
i1 += hypre__i0inc1;\
i2 += hypre__i0inc2;\
i3 += hypre__i0inc3;\
}\
zypre_BoxLoopInc1();\
i1 += hypre__ikinc1[hypre__d];\
i2 += hypre__ikinc2[hypre__d];\
i3 += hypre__ikinc3[hypre__d];\
zypre_BoxLoopInc2();\
}\
}\
}
/*-----------------------------------*/
#define zypre_BoxLoop4Begin(ndim, loop_size,\
dbox1, start1, stride1, i1,\
dbox2, start2, stride2, i2,\
dbox3, start3, stride3, i3,\
dbox4, start4, stride4, i4)\
{\
HYPRE_Int i1,i2,i3,i4;\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopDeclareK(2);\
zypre_BoxLoopDeclareK(3);\
zypre_BoxLoopDeclareK(4);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BoxLoopInitK(1, dbox1, start1, stride1, i1);\
zypre_BoxLoopInitK(2, dbox2, start2, stride2, i2);\
zypre_BoxLoopInitK(3, dbox3, start3, stride3, i3);\
zypre_BoxLoopInitK(4, dbox4, start4, stride4, i4);
#define zypre_BoxLoop4For(i1, i2, i3, i4)\
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)\
{\
HYPRE_Int i1,i2,i3,i4;\
zypre_BoxLoopSet();\
zypre_BoxLoopSetK(1, i1);\
zypre_BoxLoopSetK(2, i2);\
zypre_BoxLoopSetK(3, i3);\
zypre_BoxLoopSetK(4, i4);\
for (hypre__J = 0; hypre__J < hypre__JN; hypre__J++)\
{\
for (hypre__I = 0; hypre__I < hypre__IN; hypre__I++)\
{
#define zypre_BoxLoop4End(i1, i2, i3, i4)\
i1 += hypre__i0inc1;\
i2 += hypre__i0inc2;\
i3 += hypre__i0inc3;\
i4 += hypre__i0inc4;\
}\
zypre_BoxLoopInc1();\
i1 += hypre__ikinc1[hypre__d];\
i2 += hypre__ikinc2[hypre__d];\
i3 += hypre__ikinc3[hypre__d];\
i4 += hypre__ikinc4[hypre__d];\
zypre_BoxLoopInc2();\
}\
}\
}
/*-----------------------------------*/
#define zypre_BasicBoxLoopInitK(k, stridek) \
hypre__sk##k[0] = stridek[0];\
hypre__ikinc##k[0] = 0;\
for (hypre__d = 1; hypre__d < hypre__ndim; hypre__d++)\
{\
hypre__sk##k[hypre__d] = stridek[hypre__d];\
hypre__ikinc##k[hypre__d] = hypre__ikinc##k[hypre__d-1] +\
hypre__sk##k[hypre__d] - hypre__n[hypre__d-1]*hypre__sk##k[hypre__d-1];\
}\
hypre__i0inc##k = hypre__sk##k[0];\
hypre__ikinc##k[hypre__ndim] = 0;\
hypre__ikstart##k = 0
#define zypre_BasicBoxLoop2Begin(ndim, loop_size,\
stride1, i1,\
stride2, i2)\
{\
zypre_BoxLoopDeclare();\
zypre_BoxLoopDeclareK(1);\
zypre_BoxLoopDeclareK(2);\
zypre_BoxLoopInit(ndim, loop_size);\
zypre_BasicBoxLoopInitK(1, stride1);\
zypre_BasicBoxLoopInitK(2, stride2);
/*-----------------------------------*/
#endif
/*--------------------------------------------------------------------------
* NOTES - Keep these for reference here and elsewhere in the code
*--------------------------------------------------------------------------*/
#if 0
#define hypre_BoxLoop2Begin(loop_size,
dbox1, start1, stride1, i1,
dbox2, start2, stride2, i2)
{
/* init hypre__i1start */
HYPRE_Int hypre__i1start = hypre_BoxIndexRank(dbox1, start1);
HYPRE_Int hypre__i2start = hypre_BoxIndexRank(dbox2, start2);
/* declare and set hypre__s1 */
hypre_BoxLoopDeclareS(dbox1, stride1, hypre__sx1, hypre__sy1, hypre__sz1);
hypre_BoxLoopDeclareS(dbox2, stride2, hypre__sx2, hypre__sy2, hypre__sz2);
/* declare and set hypre__n, hypre__m, hypre__dir, hypre__max,
* hypre__div, hypre__mod, hypre__block, hypre__num_blocks */
hypre_BoxLoopDeclareN(loop_size);
#define hypre_BoxLoop2For(i, j, k, i1, i2)
for (hypre__block = 0; hypre__block < hypre__num_blocks; hypre__block++)
{
/* set i and hypre__n */
hypre_BoxLoopSet(i, j, k);
/* set i1 */
i1 = hypre__i1start + i*hypre__sx1 + j*hypre__sy1 + k*hypre__sz1;
i2 = hypre__i2start + i*hypre__sx2 + j*hypre__sy2 + k*hypre__sz2;
for (k = 0; k < hypre__nz; k++)
{
for (j = 0; j < hypre__ny; j++)
{
for (i = 0; i < hypre__nx; i++)
{
#define hypre_BoxLoop2End(i1, i2)
i1 += hypre__sx1;
i2 += hypre__sx2;
}
i1 += hypre__sy1 - hypre__nx*hypre__sx1;
i2 += hypre__sy2 - hypre__nx*hypre__sx2;
}
i1 += hypre__sz1 - hypre__ny*hypre__sy1;
i2 += hypre__sz2 - hypre__ny*hypre__sy2;
}
}
}
/*----------------------------------------
* Idea 2: Simple version of Idea 3 below
*----------------------------------------*/
N = 1;
for (d = 0; d < ndim; d++)
{
N *= n[d];
i[d] = 0;
n[d] -= 2; /* this produces a simpler comparison below */
}
i[ndim] = 0;
n[ndim] = 0;
for (I = 0; I < N; I++)
{
/* loop body */
for (d = 0; i[d] > n[d]; d++)
{
i[d] = 0;
}
i[d]++;
i1 += s1[d]; /* NOTE: These are different from hypre__sx1, etc. above */
i2 += s2[d]; /* The lengths of i, n, and s must be (ndim+1) */
}
/*----------------------------------------
* Idea 3: Approach used in the box loops
*----------------------------------------*/
N = 1;
for (d = 1; d < ndim; d++)
{
N *= n[d];
i[d] = 0;
n[d] -= 2; /* this produces a simpler comparison below */
}
i[ndim] = 0;
n[ndim] = 0;
for (J = 0; J < N; J++)
{
for (I = 0; I < n[0]; I++)
{
/* loop body */
i1 += s1[0];
i2 += s2[0];
}
for (d = 1; i[d] > n[d]; d++)
{
i[d] = 0;
}
i[d]++;
i1 += s1[d]; /* NOTE: These are different from hypre__sx1, etc. above */
i2 += s2[d]; /* The lengths of i, n, and s must be (ndim+1) */
}
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the struct assumed partition
*
*****************************************************************************/
#ifndef hypre_ASSUMED_PART_HEADER
#define hypre_ASSUMED_PART_HEADER
typedef struct
{
/* the entries will be the same for all procs */
HYPRE_Int ndim; /* number of dimensions */
hypre_BoxArray *regions; /* areas of the grid with boxes */
HYPRE_Int num_regions; /* how many regions */
HYPRE_Int *proc_partitions; /* proc ids assigned to each region
(this is size num_regions +1) */
hypre_Index *divisions; /* number of proc divisions in each
direction for each region */
/* these entries are specific to each proc */
hypre_BoxArray *my_partition; /* my portion of grid (at most 2) */
hypre_BoxArray *my_partition_boxes; /* boxes in my portion */
HYPRE_Int *my_partition_proc_ids;
HYPRE_Int *my_partition_boxnums;
HYPRE_Int my_partition_ids_size;
HYPRE_Int my_partition_ids_alloc;
HYPRE_Int my_partition_num_distinct_procs;
} hypre_StructAssumedPart;
/*Accessor macros */
#define hypre_StructAssumedPartNDim(apart) ((apart)->ndim)
#define hypre_StructAssumedPartRegions(apart) ((apart)->regions)
#define hypre_StructAssumedPartNumRegions(apart) ((apart)->num_regions)
#define hypre_StructAssumedPartDivisions(apart) ((apart)->divisions)
#define hypre_StructAssumedPartDivision(apart, i) ((apart)->divisions[i])
#define hypre_StructAssumedPartProcPartitions(apart) ((apart)->proc_partitions)
#define hypre_StructAssumedPartProcPartition(apart, i) ((apart)->proc_partitions[i])
#define hypre_StructAssumedPartMyPartition(apart) ((apart)->my_partition)
#define hypre_StructAssumedPartMyPartitionBoxes(apart) ((apart)->my_partition_boxes)
#define hypre_StructAssumedPartMyPartitionProcIds(apart) ((apart)->my_partition_proc_ids)
#define hypre_StructAssumedPartMyPartitionIdsSize(apart) ((apart)->my_partition_ids_size)
#define hypre_StructAssumedPartMyPartitionIdsAlloc(apart) ((apart)->my_partition_ids_alloc)
#define hypre_StructAssumedPartMyPartitionNumDistinctProcs(apart) ((apart)->my_partition_num_distinct_procs)
#define hypre_StructAssumedPartMyPartitionBoxnums(apart) ((apart)->my_partition_boxnums)
#define hypre_StructAssumedPartMyPartitionProcId(apart, i) ((apart)->my_partition_proc_ids[i])
#define hypre_StructAssumedPartMyPartitionBoxnum(apart, i) ((apart)->my_partition_boxnums[i])
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef hypre_BOX_MANAGER_HEADER
#define hypre_BOX_MANAGER_HEADER
/*--------------------------------------------------------------------------
* BoxManEntry
*--------------------------------------------------------------------------*/
typedef struct hypre_BoxManEntry_struct
{
hypre_Index imin; /* Extents of box */
hypre_Index imax;
HYPRE_Int ndim; /* Number of dimensions */
HYPRE_Int proc; /* This is a two-part unique id: (proc, id) */
HYPRE_Int id;
HYPRE_Int num_ghost[2*HYPRE_MAXDIM];
HYPRE_Int position; /* This indicates the location of the entry in the the
* box manager entries array and is used for pairing with
* the info object (populated in addentry) */
void *boxman; /* The owning manager (populated in addentry) */
struct hypre_BoxManEntry_struct *next;
} hypre_BoxManEntry;
/*---------------------------------------------------------------------------
* Box Manager: organizes arbitrary information in a spatial way
*----------------------------------------------------------------------------*/
typedef struct
{
MPI_Comm comm;
HYPRE_Int max_nentries; /* storage allocated for entries */
HYPRE_Int is_gather_called; /* Boolean to indicate whether
GatherEntries function has been
called (prior to assemble) - may not
want this (can tell by the size of
gather_regions array) */
hypre_BoxArray *gather_regions; /* This is where we collect boxes input
by calls to BoxManGatherEntries - to
be gathered in the assemble. These
are then deleted after the assemble */
HYPRE_Int all_global_known; /* Boolean to say that every processor
already has all of the global data
for this manager (this could be
accessed by a coarsening routine,
for example) */
HYPRE_Int is_entries_sort; /* Boolean to say that entries were
added in sorted order (id, proc)
(this could be accessed by a
coarsening routine, for example) */
HYPRE_Int entry_info_size; /* In bytes, the (max) size of the info
object for the entries */
HYPRE_Int is_assembled; /* Flag to indicate if the box manager has
been assembled (used to control whether
or not functions can be used prior to
assemble) */
/* Storing the entries */
HYPRE_Int nentries; /* Number of entries stored */
hypre_BoxManEntry *entries; /* Actual box manager entries - sorted by
(proc, id) at the end of the assemble) */
HYPRE_Int *procs_sort; /* The sorted procs corresponding to entries */
HYPRE_Int *ids_sort; /* Sorted ids corresponding to the entries */
HYPRE_Int num_procs_sort; /* Number of distinct procs in entries */
HYPRE_Int *procs_sort_offsets; /* Offsets for procs into the
entry_sort array */
HYPRE_Int first_local; /* Position of local infomation in entries */
HYPRE_Int local_proc_offset; /* Position of local information in
offsets */
/* Here is the table that organizes the entries spatially (by index) */
hypre_BoxManEntry **index_table; /* This points into 'entries' array and
corresponds to the index arays */
HYPRE_Int *indexes[HYPRE_MAXDIM]; /* Indexes (ordered) for imin and
imax of each box in the entries
array */
HYPRE_Int size[HYPRE_MAXDIM]; /* How many indexes in each
direction */
HYPRE_Int last_index[HYPRE_MAXDIM]; /* Last index used in the
indexes map */
HYPRE_Int num_my_entries; /* Num entries with proc_id = myid */
HYPRE_Int *my_ids; /* Array of ids corresponding to my entries */
hypre_BoxManEntry **my_entries; /* Points into entries that are mine and
corresponds to my_ids array. This is
destroyed in the assemble. */
void *info_objects; /* Array of info objects (each of size
entry_info_size), managed byte-wise */
hypre_StructAssumedPart *assumed_partition; /* The assumed partition object.
For now this is only used
during the assemble (where it
is created). */
HYPRE_Int ndim; /* Problem dimension (known in the grid) */
hypre_Box *bounding_box; /* Bounding box from associated grid */
HYPRE_Int next_id; /* Counter to indicate the next id that would be
unique (regardless of proc id) */
/* Ghost stuff */
HYPRE_Int num_ghost[2*HYPRE_MAXDIM];
} hypre_BoxManager;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_BoxMan
*--------------------------------------------------------------------------*/
#define hypre_BoxManComm(manager) ((manager) -> comm)
#define hypre_BoxManMaxNEntries(manager) ((manager) -> max_nentries)
#define hypre_BoxManIsGatherCalled(manager) ((manager) -> is_gather_called)
#define hypre_BoxManIsEntriesSort(manager) ((manager) -> is_entries_sort)
#define hypre_BoxManGatherRegions(manager) ((manager) -> gather_regions)
#define hypre_BoxManAllGlobalKnown(manager) ((manager) -> all_global_known)
#define hypre_BoxManEntryInfoSize(manager) ((manager) -> entry_info_size)
#define hypre_BoxManNEntries(manager) ((manager) -> nentries)
#define hypre_BoxManEntries(manager) ((manager) -> entries)
#define hypre_BoxManInfoObjects(manager) ((manager) -> info_objects)
#define hypre_BoxManIsAssembled(manager) ((manager) -> is_assembled)
#define hypre_BoxManProcsSort(manager) ((manager) -> procs_sort)
#define hypre_BoxManIdsSort(manager) ((manager) -> ids_sort)
#define hypre_BoxManNumProcsSort(manager) ((manager) -> num_procs_sort)
#define hypre_BoxManProcsSortOffsets(manager) ((manager) -> procs_sort_offsets)
#define hypre_BoxManLocalProcOffset(manager) ((manager) -> local_proc_offset)
#define hypre_BoxManFirstLocal(manager) ((manager) -> first_local)
#define hypre_BoxManIndexTable(manager) ((manager) -> index_table)
#define hypre_BoxManIndexes(manager) ((manager) -> indexes)
#define hypre_BoxManSize(manager) ((manager) -> size)
#define hypre_BoxManLastIndex(manager) ((manager) -> last_index)
#define hypre_BoxManNumMyEntries(manager) ((manager) -> num_my_entries)
#define hypre_BoxManMyIds(manager) ((manager) -> my_ids)
#define hypre_BoxManMyEntries(manager) ((manager) -> my_entries)
#define hypre_BoxManAssumedPartition(manager) ((manager) -> assumed_partition)
#define hypre_BoxManNDim(manager) ((manager) -> ndim)
#define hypre_BoxManBoundingBox(manager) ((manager) -> bounding_box)
#define hypre_BoxManNextId(manager) ((manager) -> next_id)
#define hypre_BoxManNumGhost(manager) ((manager) -> num_ghost)
#define hypre_BoxManIndexesD(manager, d) hypre_BoxManIndexes(manager)[d]
#define hypre_BoxManSizeD(manager, d) hypre_BoxManSize(manager)[d]
#define hypre_BoxManLastIndexD(manager, d) hypre_BoxManLastIndex(manager)[d]
#define hypre_BoxManInfoObject(manager, i) \
(void *) ((char *)hypre_BoxManInfoObjects(manager) + i* hypre_BoxManEntryInfoSize(manager))
/*--------------------------------------------------------------------------
* Accessor macros: hypre_BoxManEntry
*--------------------------------------------------------------------------*/
#define hypre_BoxManEntryIMin(entry) ((entry) -> imin)
#define hypre_BoxManEntryIMax(entry) ((entry) -> imax)
#define hypre_BoxManEntryNDim(entry) ((entry) -> ndim)
#define hypre_BoxManEntryProc(entry) ((entry) -> proc)
#define hypre_BoxManEntryId(entry) ((entry) -> id)
#define hypre_BoxManEntryPosition(entry) ((entry) -> position)
#define hypre_BoxManEntryNumGhost(entry) ((entry) -> num_ghost)
#define hypre_BoxManEntryNext(entry) ((entry) -> next)
#define hypre_BoxManEntryBoxMan(entry) ((entry) -> boxman)
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the hypre_StructGrid structures
*
*****************************************************************************/
#ifndef hypre_STRUCT_GRID_HEADER
#define hypre_STRUCT_GRID_HEADER
/*--------------------------------------------------------------------------
* hypre_StructGrid:
*--------------------------------------------------------------------------*/
typedef struct hypre_StructGrid_struct
{
MPI_Comm comm;
HYPRE_Int ndim; /* Number of grid dimensions */
hypre_BoxArray *boxes; /* Array of boxes in this process */
HYPRE_Int *ids; /* Unique IDs for boxes */
hypre_Index max_distance; /* Neighborhood size - in each dimension*/
hypre_Box *bounding_box; /* Bounding box around grid */
HYPRE_Int local_size; /* Number of grid points locally */
HYPRE_BigInt global_size; /* Total number of grid points */
hypre_Index periodic; /* Indicates if grid is periodic */
HYPRE_Int num_periods; /* number of box set periods */
hypre_Index *pshifts; /* shifts of periodicity */
HYPRE_Int ref_count;
HYPRE_Int ghlocal_size; /* Number of vars in box including ghosts */
HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; /* ghost layer size */
hypre_BoxManager *boxman;
#if defined(HYPRE_USING_CUDA)
HYPRE_Int data_location;
#endif
} hypre_StructGrid;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_StructGrid
*--------------------------------------------------------------------------*/
#define hypre_StructGridComm(grid) ((grid) -> comm)
#define hypre_StructGridNDim(grid) ((grid) -> ndim)
#define hypre_StructGridBoxes(grid) ((grid) -> boxes)
#define hypre_StructGridIDs(grid) ((grid) -> ids)
#define hypre_StructGridMaxDistance(grid) ((grid) -> max_distance)
#define hypre_StructGridBoundingBox(grid) ((grid) -> bounding_box)
#define hypre_StructGridLocalSize(grid) ((grid) -> local_size)
#define hypre_StructGridGlobalSize(grid) ((grid) -> global_size)
#define hypre_StructGridPeriodic(grid) ((grid) -> periodic)
#define hypre_StructGridNumPeriods(grid) ((grid) -> num_periods)
#define hypre_StructGridPShifts(grid) ((grid) -> pshifts)
#define hypre_StructGridPShift(grid, i) ((grid) -> pshifts[i])
#define hypre_StructGridRefCount(grid) ((grid) -> ref_count)
#define hypre_StructGridGhlocalSize(grid) ((grid) -> ghlocal_size)
#define hypre_StructGridNumGhost(grid) ((grid) -> num_ghost)
#define hypre_StructGridBoxMan(grid) ((grid) -> boxman)
#define hypre_StructGridBox(grid, i) \
(hypre_BoxArrayBox(hypre_StructGridBoxes(grid), i))
#define hypre_StructGridNumBoxes(grid) \
(hypre_BoxArraySize(hypre_StructGridBoxes(grid)))
#define hypre_StructGridIDPeriod(grid) \
hypre_BoxNeighborsIDPeriod(hypre_StructGridNeighbors(grid))
#if defined(HYPRE_USING_CUDA)
#define hypre_StructGridDataLocation(grid) ((grid) -> data_location)
#endif
/*--------------------------------------------------------------------------
* Looping macros:
*--------------------------------------------------------------------------*/
#define hypre_ForStructGridBoxI(i, grid) \
hypre_ForBoxI(i, hypre_StructGridBoxes(grid))
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for hypre_StructStencil data structures
*
*****************************************************************************/
#ifndef hypre_STRUCT_STENCIL_HEADER
#define hypre_STRUCT_STENCIL_HEADER
/*--------------------------------------------------------------------------
* hypre_StructStencil
*--------------------------------------------------------------------------*/
typedef struct hypre_StructStencil_struct
{
hypre_Index *shape; /* Description of a stencil's shape */
HYPRE_Int size; /* Number of stencil coefficients */
HYPRE_Int ndim; /* Number of dimensions */
HYPRE_Int ref_count;
} hypre_StructStencil;
/*--------------------------------------------------------------------------
* Accessor functions for the hypre_StructStencil structure
*--------------------------------------------------------------------------*/
#define hypre_StructStencilShape(stencil) ((stencil) -> shape)
#define hypre_StructStencilSize(stencil) ((stencil) -> size)
#define hypre_StructStencilNDim(stencil) ((stencil) -> ndim)
#define hypre_StructStencilRefCount(stencil) ((stencil) -> ref_count)
#define hypre_StructStencilElement(stencil, i) hypre_StructStencilShape(stencil)[i]
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#ifndef hypre_COMMUNICATION_HEADER
#define hypre_COMMUNICATION_HEADER
/*--------------------------------------------------------------------------
* hypre_CommInfo:
*
* For "reverse" communication, the following are not needed (may be NULL)
* send_rboxnums, send_rboxes, send_transforms
*
* For "forward" communication, the following are not needed (may be NULL)
* recv_rboxnums, recv_rboxes, recv_transforms
*
*--------------------------------------------------------------------------*/
typedef struct hypre_CommInfo_struct
{
HYPRE_Int ndim;
hypre_BoxArrayArray *send_boxes;
hypre_Index send_stride;
HYPRE_Int **send_processes;
HYPRE_Int **send_rboxnums;
hypre_BoxArrayArray *send_rboxes; /* send_boxes, some with periodic shift */
hypre_BoxArrayArray *recv_boxes;
hypre_Index recv_stride;
HYPRE_Int **recv_processes;
HYPRE_Int **recv_rboxnums;
hypre_BoxArrayArray *recv_rboxes; /* recv_boxes, some with periodic shift */
HYPRE_Int num_transforms; /* may be 0 = identity transform */
hypre_Index *coords; /* may be NULL = identity transform */
hypre_Index *dirs; /* may be NULL = identity transform */
HYPRE_Int **send_transforms; /* may be NULL = identity transform */
HYPRE_Int **recv_transforms; /* may be NULL = identity transform */
HYPRE_Int boxes_match; /* true (>0) if each send box has a
* matching box on the recv processor */
} hypre_CommInfo;
/*--------------------------------------------------------------------------
* hypre_CommEntryType:
*--------------------------------------------------------------------------*/
typedef struct hypre_CommEntryType_struct
{
HYPRE_Int offset; /* offset for the data */
HYPRE_Int dim; /* dimension of the communication */
HYPRE_Int length_array[HYPRE_MAXDIM]; /* last dim has length num_values */
HYPRE_Int stride_array[HYPRE_MAXDIM+1];
HYPRE_Int *order; /* order of last dim values */
} hypre_CommEntryType;
/*--------------------------------------------------------------------------
* hypre_CommType:
*--------------------------------------------------------------------------*/
typedef struct hypre_CommType_struct
{
HYPRE_Int proc;
HYPRE_Int bufsize; /* message buffer size (in doubles) */
HYPRE_Int num_entries;
hypre_CommEntryType *entries;
/* this is only needed until first send buffer prefix is packed */
HYPRE_Int *rem_boxnums; /* entry remote box numbers */
hypre_Box *rem_boxes; /* entry remote boxes */
} hypre_CommType;
/*--------------------------------------------------------------------------
* hypre_CommPkg:
* Structure containing information for doing communications
*--------------------------------------------------------------------------*/
typedef struct hypre_CommPkg_struct
{
MPI_Comm comm;
HYPRE_Int first_comm; /* is this the first communication? */
HYPRE_Int ndim;
HYPRE_Int num_values;
hypre_Index send_stride;
hypre_Index recv_stride;
HYPRE_Int send_bufsize; /* total send buffer size (in doubles) */
HYPRE_Int recv_bufsize; /* total recv buffer size (in doubles) */
HYPRE_Int num_sends;
HYPRE_Int num_recvs;
hypre_CommType *send_types;
hypre_CommType *recv_types;
hypre_CommType *copy_from_type;
hypre_CommType *copy_to_type;
/* these pointers are just to help free up memory for send/from types */
hypre_CommEntryType *entries;
HYPRE_Int *rem_boxnums;
hypre_Box *rem_boxes;
HYPRE_Int num_orders;
HYPRE_Int **orders; /* num_orders x num_values */
HYPRE_Int *recv_data_offsets; /* offsets into recv data (by box) */
hypre_BoxArray *recv_data_space; /* recv data dimensions (by box) */
hypre_Index identity_coord;
hypre_Index identity_dir;
HYPRE_Int *identity_order;
} hypre_CommPkg;
/*--------------------------------------------------------------------------
* CommHandle:
*--------------------------------------------------------------------------*/
typedef struct hypre_CommHandle_struct
{
hypre_CommPkg *comm_pkg;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
HYPRE_Int num_requests;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
HYPRE_Complex **send_buffers;
HYPRE_Complex **recv_buffers;
HYPRE_Complex **send_buffers_data;
HYPRE_Complex **recv_buffers_data;
/* set = 0, add = 1 */
HYPRE_Int action;
} hypre_CommHandle;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_CommInto
*--------------------------------------------------------------------------*/
#define hypre_CommInfoNDim(info) (info -> ndim)
#define hypre_CommInfoSendBoxes(info) (info -> send_boxes)
#define hypre_CommInfoSendStride(info) (info -> send_stride)
#define hypre_CommInfoSendProcesses(info) (info -> send_processes)
#define hypre_CommInfoSendRBoxnums(info) (info -> send_rboxnums)
#define hypre_CommInfoSendRBoxes(info) (info -> send_rboxes)
#define hypre_CommInfoRecvBoxes(info) (info -> recv_boxes)
#define hypre_CommInfoRecvStride(info) (info -> recv_stride)
#define hypre_CommInfoRecvProcesses(info) (info -> recv_processes)
#define hypre_CommInfoRecvRBoxnums(info) (info -> recv_rboxnums)
#define hypre_CommInfoRecvRBoxes(info) (info -> recv_rboxes)
#define hypre_CommInfoNumTransforms(info) (info -> num_transforms)
#define hypre_CommInfoCoords(info) (info -> coords)
#define hypre_CommInfoDirs(info) (info -> dirs)
#define hypre_CommInfoSendTransforms(info) (info -> send_transforms)
#define hypre_CommInfoRecvTransforms(info) (info -> recv_transforms)
#define hypre_CommInfoBoxesMatch(info) (info -> boxes_match)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_CommEntryType
*--------------------------------------------------------------------------*/
#define hypre_CommEntryTypeOffset(entry) (entry -> offset)
#define hypre_CommEntryTypeDim(entry) (entry -> dim)
#define hypre_CommEntryTypeLengthArray(entry) (entry -> length_array)
#define hypre_CommEntryTypeStrideArray(entry) (entry -> stride_array)
#define hypre_CommEntryTypeOrder(entry) (entry -> order)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_CommType
*--------------------------------------------------------------------------*/
#define hypre_CommTypeProc(type) (type -> proc)
#define hypre_CommTypeBufsize(type) (type -> bufsize)
#define hypre_CommTypeNumEntries(type) (type -> num_entries)
#define hypre_CommTypeEntries(type) (type -> entries)
#define hypre_CommTypeEntry(type, i) &(type -> entries[i])
#define hypre_CommTypeRemBoxnums(type) (type -> rem_boxnums)
#define hypre_CommTypeRemBoxnum(type, i) (type -> rem_boxnums[i])
#define hypre_CommTypeRemBoxes(type) (type -> rem_boxes)
#define hypre_CommTypeRemBox(type, i) &(type -> rem_boxes[i])
/*--------------------------------------------------------------------------
* Accessor macros: hypre_CommPkg
*--------------------------------------------------------------------------*/
#define hypre_CommPkgComm(comm_pkg) (comm_pkg -> comm)
#define hypre_CommPkgFirstComm(comm_pkg) (comm_pkg -> first_comm)
#define hypre_CommPkgNDim(comm_pkg) (comm_pkg -> ndim)
#define hypre_CommPkgNumValues(comm_pkg) (comm_pkg -> num_values)
#define hypre_CommPkgSendStride(comm_pkg) (comm_pkg -> send_stride)
#define hypre_CommPkgRecvStride(comm_pkg) (comm_pkg -> recv_stride)
#define hypre_CommPkgSendBufsize(comm_pkg) (comm_pkg -> send_bufsize)
#define hypre_CommPkgRecvBufsize(comm_pkg) (comm_pkg -> recv_bufsize)
#define hypre_CommPkgNumSends(comm_pkg) (comm_pkg -> num_sends)
#define hypre_CommPkgNumRecvs(comm_pkg) (comm_pkg -> num_recvs)
#define hypre_CommPkgSendTypes(comm_pkg) (comm_pkg -> send_types)
#define hypre_CommPkgSendType(comm_pkg, i) &(comm_pkg -> send_types[i])
#define hypre_CommPkgRecvTypes(comm_pkg) (comm_pkg -> recv_types)
#define hypre_CommPkgRecvType(comm_pkg, i) &(comm_pkg -> recv_types[i])
#define hypre_CommPkgCopyFromType(comm_pkg) (comm_pkg -> copy_from_type)
#define hypre_CommPkgCopyToType(comm_pkg) (comm_pkg -> copy_to_type)
#define hypre_CommPkgEntries(comm_pkg) (comm_pkg -> entries)
#define hypre_CommPkgRemBoxnums(comm_pkg) (comm_pkg -> rem_boxnums)
#define hypre_CommPkgRemBoxes(comm_pkg) (comm_pkg -> rem_boxes)
#define hypre_CommPkgNumOrders(comm_pkg) (comm_pkg -> num_orders)
#define hypre_CommPkgOrders(comm_pkg) (comm_pkg -> orders)
#define hypre_CommPkgRecvDataOffsets(comm_pkg) (comm_pkg -> recv_data_offsets)
#define hypre_CommPkgRecvDataSpace(comm_pkg) (comm_pkg -> recv_data_space)
#define hypre_CommPkgIdentityCoord(comm_pkg) (comm_pkg -> identity_coord)
#define hypre_CommPkgIdentityDir(comm_pkg) (comm_pkg -> identity_dir)
#define hypre_CommPkgIdentityOrder(comm_pkg) (comm_pkg -> identity_order)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_CommHandle
*--------------------------------------------------------------------------*/
#define hypre_CommHandleCommPkg(comm_handle) (comm_handle -> comm_pkg)
#define hypre_CommHandleSendData(comm_handle) (comm_handle -> send_data)
#define hypre_CommHandleRecvData(comm_handle) (comm_handle -> recv_data)
#define hypre_CommHandleNumRequests(comm_handle) (comm_handle -> num_requests)
#define hypre_CommHandleRequests(comm_handle) (comm_handle -> requests)
#define hypre_CommHandleStatus(comm_handle) (comm_handle -> status)
#define hypre_CommHandleSendBuffers(comm_handle) (comm_handle -> send_buffers)
#define hypre_CommHandleRecvBuffers(comm_handle) (comm_handle -> recv_buffers)
#define hypre_CommHandleAction(comm_handle) (comm_handle -> action)
#define hypre_CommHandleSendBuffersDevice(comm_handle) (comm_handle -> send_buffers_data)
#define hypre_CommHandleRecvBuffersDevice(comm_handle) (comm_handle -> recv_buffers_data)
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for computation
*
*****************************************************************************/
#ifndef hypre_COMPUTATION_HEADER
#define hypre_COMPUTATION_HEADER
/*--------------------------------------------------------------------------
* hypre_ComputeInfo:
*--------------------------------------------------------------------------*/
typedef struct hypre_ComputeInfo_struct
{
hypre_CommInfo *comm_info;
hypre_BoxArrayArray *indt_boxes;
hypre_BoxArrayArray *dept_boxes;
hypre_Index stride;
} hypre_ComputeInfo;
/*--------------------------------------------------------------------------
* hypre_ComputePkg:
* Structure containing information for doing computations.
*--------------------------------------------------------------------------*/
typedef struct hypre_ComputePkg_struct
{
hypre_CommPkg *comm_pkg;
hypre_BoxArrayArray *indt_boxes;
hypre_BoxArrayArray *dept_boxes;
hypre_Index stride;
hypre_StructGrid *grid;
hypre_BoxArray *data_space;
HYPRE_Int num_values;
} hypre_ComputePkg;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_ComputeInfo
*--------------------------------------------------------------------------*/
#define hypre_ComputeInfoCommInfo(info) (info -> comm_info)
#define hypre_ComputeInfoIndtBoxes(info) (info -> indt_boxes)
#define hypre_ComputeInfoDeptBoxes(info) (info -> dept_boxes)
#define hypre_ComputeInfoStride(info) (info -> stride)
/*--------------------------------------------------------------------------
* Accessor macros: hypre_ComputePkg
*--------------------------------------------------------------------------*/
#define hypre_ComputePkgCommPkg(compute_pkg) (compute_pkg -> comm_pkg)
#define hypre_ComputePkgIndtBoxes(compute_pkg) (compute_pkg -> indt_boxes)
#define hypre_ComputePkgDeptBoxes(compute_pkg) (compute_pkg -> dept_boxes)
#define hypre_ComputePkgStride(compute_pkg) (compute_pkg -> stride)
#define hypre_ComputePkgGrid(compute_pkg) (compute_pkg -> grid)
#define hypre_ComputePkgDataSpace(compute_pkg) (compute_pkg -> data_space)
#define hypre_ComputePkgNumValues(compute_pkg) (compute_pkg -> num_values)
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the hypre_StructMatrix structures
*
*****************************************************************************/
#ifndef hypre_STRUCT_MATRIX_HEADER
#define hypre_STRUCT_MATRIX_HEADER
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_StructMatrix:
*--------------------------------------------------------------------------*/
typedef struct hypre_StructMatrix_struct
{
MPI_Comm comm;
hypre_StructGrid *grid;
hypre_StructStencil *user_stencil;
hypre_StructStencil *stencil;
HYPRE_Int num_values; /* Number of "stored" coefficients */
hypre_BoxArray *data_space;
HYPRE_Complex *data; /* Pointer to variable matrix data */
HYPRE_Complex *data_const; /* Pointer to constant matrix data */
HYPRE_Complex **stencil_data; /* Pointer for each stencil */
HYPRE_Int data_alloced; /* Boolean used for freeing data */
HYPRE_Int data_size; /* Size of variable matrix data */
HYPRE_Int data_const_size; /* Size of constant matrix data */
HYPRE_Int **data_indices; /* num-boxes by stencil-size array
of indices into the data array.
data_indices[b][s] is the starting
index of matrix data corresponding
to box b and stencil coefficient s */
HYPRE_Int constant_coefficient; /* normally 0; set to 1 for
constant coefficient matrices
or 2 for constant coefficient
with variable diagonal */
HYPRE_Int symmetric; /* Is the matrix symmetric */
HYPRE_Int *symm_elements; /* Which elements are "symmetric" */
HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; /* Num ghost layers in each direction */
HYPRE_BigInt global_size; /* Total number of nonzero coeffs */
hypre_CommPkg *comm_pkg; /* Info on how to update ghost data */
HYPRE_Int ref_count;
} hypre_StructMatrix;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_StructMatrix
*--------------------------------------------------------------------------*/
#define hypre_StructMatrixComm(matrix) ((matrix) -> comm)
#define hypre_StructMatrixGrid(matrix) ((matrix) -> grid)
#define hypre_StructMatrixUserStencil(matrix) ((matrix) -> user_stencil)
#define hypre_StructMatrixStencil(matrix) ((matrix) -> stencil)
#define hypre_StructMatrixNumValues(matrix) ((matrix) -> num_values)
#define hypre_StructMatrixDataSpace(matrix) ((matrix) -> data_space)
#define hypre_StructMatrixData(matrix) ((matrix) -> data)
#define hypre_StructMatrixDataConst(matrix) ((matrix) -> data_const)
#define hypre_StructMatrixStencilData(matrix) ((matrix) -> stencil_data)
#define hypre_StructMatrixDataAlloced(matrix) ((matrix) -> data_alloced)
#define hypre_StructMatrixDataSize(matrix) ((matrix) -> data_size)
#define hypre_StructMatrixDataConstSize(matrix) ((matrix) -> data_const_size)
#define hypre_StructMatrixDataIndices(matrix) ((matrix) -> data_indices)
#define hypre_StructMatrixConstantCoefficient(matrix) ((matrix) -> constant_coefficient)
#define hypre_StructMatrixSymmetric(matrix) ((matrix) -> symmetric)
#define hypre_StructMatrixSymmElements(matrix) ((matrix) -> symm_elements)
#define hypre_StructMatrixNumGhost(matrix) ((matrix) -> num_ghost)
#define hypre_StructMatrixGlobalSize(matrix) ((matrix) -> global_size)
#define hypre_StructMatrixCommPkg(matrix) ((matrix) -> comm_pkg)
#define hypre_StructMatrixRefCount(matrix) ((matrix) -> ref_count)
#define hypre_StructMatrixNDim(matrix) \
hypre_StructGridNDim(hypre_StructMatrixGrid(matrix))
#define hypre_StructMatrixBox(matrix, b) \
hypre_BoxArrayBox(hypre_StructMatrixDataSpace(matrix), b)
#define hypre_StructMatrixBoxData(matrix, b, s) \
(hypre_StructMatrixStencilData(matrix)[s] + hypre_StructMatrixDataIndices(matrix)[b][s])
#define hypre_StructMatrixBoxDataValue(matrix, b, s, index) \
(hypre_StructMatrixBoxData(matrix, b, s) + \
hypre_BoxIndexRank(hypre_StructMatrixBox(matrix, b), index))
#define hypre_CCStructMatrixBoxDataValue(matrix, b, s, index) \
(hypre_StructMatrixBoxData(matrix, b, s) + \
hypre_CCBoxIndexRank(hypre_StructMatrixBox(matrix, b), index))
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Header info for the hypre_StructVector structures
*
*****************************************************************************/
#ifndef hypre_STRUCT_VECTOR_HEADER
#define hypre_STRUCT_VECTOR_HEADER
/*--------------------------------------------------------------------------
* hypre_StructVector:
*--------------------------------------------------------------------------*/
typedef struct hypre_StructVector_struct
{
MPI_Comm comm;
hypre_StructGrid *grid;
hypre_BoxArray *data_space;
HYPRE_Complex *data; /* Pointer to vector data on device*/
HYPRE_Int data_alloced; /* Boolean used for freeing data */
HYPRE_Int data_size; /* Size of vector data */
HYPRE_Int *data_indices; /* num-boxes array of indices into
the data array. data_indices[b]
is the starting index of vector
data corresponding to box b. */
HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; /* Num ghost layers in each
* direction */
HYPRE_Int bghost_not_clear; /* Are boundary ghosts clear? */
HYPRE_BigInt global_size; /* Total number coefficients */
HYPRE_Int ref_count;
} hypre_StructVector;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_StructVector
*--------------------------------------------------------------------------*/
#define hypre_StructVectorComm(vector) ((vector) -> comm)
#define hypre_StructVectorGrid(vector) ((vector) -> grid)
#define hypre_StructVectorDataSpace(vector) ((vector) -> data_space)
#define hypre_StructVectorData(vector) ((vector) -> data)
#define hypre_StructVectorDataAlloced(vector) ((vector) -> data_alloced)
#define hypre_StructVectorDataSize(vector) ((vector) -> data_size)
#define hypre_StructVectorDataIndices(vector) ((vector) -> data_indices)
#define hypre_StructVectorNumGhost(vector) ((vector) -> num_ghost)
#define hypre_StructVectorBGhostNotClear(vector)((vector) -> bghost_not_clear)
#define hypre_StructVectorGlobalSize(vector) ((vector) -> global_size)
#define hypre_StructVectorRefCount(vector) ((vector) -> ref_count)
#define hypre_StructVectorNDim(vector) \
hypre_StructGridNDim(hypre_StructVectorGrid(vector))
#define hypre_StructVectorBox(vector, b) \
hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), b)
#define hypre_StructVectorBoxData(vector, b) \
(hypre_StructVectorData(vector) + hypre_StructVectorDataIndices(vector)[b])
#define hypre_StructVectorBoxDataValue(vector, b, index) \
(hypre_StructVectorBoxData(vector, b) + \
hypre_BoxIndexRank(hypre_StructVectorBox(vector, b), index))
#endif
/******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/* assumed_part.c */
HYPRE_Int hypre_APSubdivideRegion ( hypre_Box *region , HYPRE_Int dim , HYPRE_Int level , hypre_BoxArray *box_array , HYPRE_Int *num_new_boxes );
HYPRE_Int hypre_APFindMyBoxesInRegions ( hypre_BoxArray *region_array , hypre_BoxArray *my_box_array , HYPRE_Int **p_count_array , HYPRE_Real **p_vol_array );
HYPRE_Int hypre_APGetAllBoxesInRegions ( hypre_BoxArray *region_array , hypre_BoxArray *my_box_array , HYPRE_Int **p_count_array , HYPRE_Real **p_vol_array , MPI_Comm comm );
HYPRE_Int hypre_APShrinkRegions ( hypre_BoxArray *region_array , hypre_BoxArray *my_box_array , MPI_Comm comm );
HYPRE_Int hypre_APPruneRegions ( hypre_BoxArray *region_array , HYPRE_Int **p_count_array , HYPRE_Real **p_vol_array );
HYPRE_Int hypre_APRefineRegionsByVol ( hypre_BoxArray *region_array , HYPRE_Real *vol_array , HYPRE_Int max_regions , HYPRE_Real gamma , HYPRE_Int dim , HYPRE_Int *return_code , MPI_Comm comm );
HYPRE_Int hypre_StructAssumedPartitionCreate ( HYPRE_Int dim , hypre_Box *bounding_box , HYPRE_Real global_boxes_size , HYPRE_Int global_num_boxes , hypre_BoxArray *local_boxes , HYPRE_Int *local_boxnums , HYPRE_Int max_regions , HYPRE_Int max_refinements , HYPRE_Real gamma , MPI_Comm comm , hypre_StructAssumedPart **p_assumed_partition );
HYPRE_Int hypre_StructAssumedPartitionDestroy ( hypre_StructAssumedPart *assumed_part );
HYPRE_Int hypre_APFillResponseStructAssumedPart ( void *p_recv_contact_buf , HYPRE_Int contact_size , HYPRE_Int contact_proc , void *ro , MPI_Comm comm , void **p_send_response_buf , HYPRE_Int *response_message_size );
HYPRE_Int hypre_StructAssumedPartitionGetRegionsFromProc ( hypre_StructAssumedPart *assumed_part , HYPRE_Int proc_id , hypre_BoxArray *assumed_regions );
HYPRE_Int hypre_StructAssumedPartitionGetProcsFromBox ( hypre_StructAssumedPart *assumed_part , hypre_Box *box , HYPRE_Int *num_proc_array , HYPRE_Int *size_alloc_proc_array , HYPRE_Int **p_proc_array );
/* box_algebra.c */
HYPRE_Int hypre_IntersectBoxes ( hypre_Box *box1 , hypre_Box *box2 , hypre_Box *ibox );
HYPRE_Int hypre_SubtractBoxes ( hypre_Box *box1 , hypre_Box *box2 , hypre_BoxArray *box_array );
HYPRE_Int hypre_SubtractBoxArrays ( hypre_BoxArray *box_array1 , hypre_BoxArray *box_array2 , hypre_BoxArray *tmp_box_array );
HYPRE_Int hypre_UnionBoxes ( hypre_BoxArray *boxes );
HYPRE_Int hypre_MinUnionBoxes ( hypre_BoxArray *boxes );
/* box_boundary.c */
HYPRE_Int hypre_BoxBoundaryIntersect ( hypre_Box *box , hypre_StructGrid *grid , HYPRE_Int d , HYPRE_Int dir , hypre_BoxArray *boundary );
HYPRE_Int hypre_BoxBoundaryG ( hypre_Box *box , hypre_StructGrid *g , hypre_BoxArray *boundary );
HYPRE_Int hypre_BoxBoundaryDG ( hypre_Box *box , hypre_StructGrid *g , hypre_BoxArray *boundarym , hypre_BoxArray *boundaryp , HYPRE_Int d );
HYPRE_Int hypre_GeneralBoxBoundaryIntersect( hypre_Box *box, hypre_StructGrid *grid, hypre_Index stencil_element, hypre_BoxArray *boundary );
/* box.c */
HYPRE_Int hypre_SetIndex ( hypre_Index index , HYPRE_Int val );
HYPRE_Int hypre_CopyIndex( hypre_Index in_index , hypre_Index out_index );
HYPRE_Int hypre_CopyToCleanIndex( hypre_Index in_index , HYPRE_Int ndim , hypre_Index out_index );
HYPRE_Int hypre_IndexEqual ( hypre_Index index , HYPRE_Int val , HYPRE_Int ndim );
HYPRE_Int hypre_IndexMin( hypre_Index index , HYPRE_Int ndim );
HYPRE_Int hypre_IndexMax( hypre_Index index , HYPRE_Int ndim );
HYPRE_Int hypre_AddIndexes ( hypre_Index index1 , hypre_Index index2 , HYPRE_Int ndim , hypre_Index result );
HYPRE_Int hypre_SubtractIndexes ( hypre_Index index1 , hypre_Index index2 , HYPRE_Int ndim , hypre_Index result );
HYPRE_Int hypre_IndexesEqual ( hypre_Index index1 , hypre_Index index2 , HYPRE_Int ndim );
hypre_Box *hypre_BoxCreate ( HYPRE_Int ndim );
HYPRE_Int hypre_BoxDestroy ( hypre_Box *box );
HYPRE_Int hypre_BoxInit( hypre_Box *box , HYPRE_Int ndim );
HYPRE_Int hypre_BoxSetExtents ( hypre_Box *box , hypre_Index imin , hypre_Index imax );
HYPRE_Int hypre_CopyBox( hypre_Box *box1 , hypre_Box *box2 );
hypre_Box *hypre_BoxDuplicate ( hypre_Box *box );
HYPRE_Int hypre_BoxVolume( hypre_Box *box );
HYPRE_Real hypre_doubleBoxVolume( hypre_Box *box );
HYPRE_Int hypre_IndexInBox ( hypre_Index index , hypre_Box *box );
HYPRE_Int hypre_BoxGetSize ( hypre_Box *box , hypre_Index size );
HYPRE_Int hypre_BoxGetStrideSize ( hypre_Box *box , hypre_Index stride , hypre_Index size );
HYPRE_Int hypre_BoxGetStrideVolume ( hypre_Box *box , hypre_Index stride , HYPRE_Int *volume_ptr );
HYPRE_Int hypre_BoxIndexRank( hypre_Box *box , hypre_Index index );
HYPRE_Int hypre_BoxRankIndex( hypre_Box *box , HYPRE_Int rank , hypre_Index index );
HYPRE_Int hypre_BoxOffsetDistance( hypre_Box *box , hypre_Index index );
HYPRE_Int hypre_BoxShiftPos( hypre_Box *box , hypre_Index shift );
HYPRE_Int hypre_BoxShiftNeg( hypre_Box *box , hypre_Index shift );
HYPRE_Int hypre_BoxGrowByIndex( hypre_Box *box , hypre_Index index );
HYPRE_Int hypre_BoxGrowByValue( hypre_Box *box , HYPRE_Int val );
HYPRE_Int hypre_BoxGrowByArray ( hypre_Box *box , HYPRE_Int *array );
hypre_BoxArray *hypre_BoxArrayCreate ( HYPRE_Int size , HYPRE_Int ndim );
HYPRE_Int hypre_BoxArrayDestroy ( hypre_BoxArray *box_array );
HYPRE_Int hypre_BoxArraySetSize ( hypre_BoxArray *box_array , HYPRE_Int size );
hypre_BoxArray *hypre_BoxArrayDuplicate ( hypre_BoxArray *box_array );
HYPRE_Int hypre_AppendBox ( hypre_Box *box , hypre_BoxArray *box_array );
HYPRE_Int hypre_DeleteBox ( hypre_BoxArray *box_array , HYPRE_Int index );
HYPRE_Int hypre_DeleteMultipleBoxes ( hypre_BoxArray *box_array , HYPRE_Int *indices , HYPRE_Int num );
HYPRE_Int hypre_AppendBoxArray ( hypre_BoxArray *box_array_0 , hypre_BoxArray *box_array_1 );
hypre_BoxArrayArray *hypre_BoxArrayArrayCreate ( HYPRE_Int size , HYPRE_Int ndim );
HYPRE_Int hypre_BoxArrayArrayDestroy ( hypre_BoxArrayArray *box_array_array );
hypre_BoxArrayArray *hypre_BoxArrayArrayDuplicate ( hypre_BoxArrayArray *box_array_array );
/* box_manager.c */
HYPRE_Int hypre_BoxManEntryGetInfo ( hypre_BoxManEntry *entry , void **info_ptr );
HYPRE_Int hypre_BoxManEntryGetExtents ( hypre_BoxManEntry *entry , hypre_Index imin , hypre_Index imax );
HYPRE_Int hypre_BoxManEntryCopy ( hypre_BoxManEntry *fromentry , hypre_BoxManEntry *toentry );
HYPRE_Int hypre_BoxManSetAllGlobalKnown ( hypre_BoxManager *manager , HYPRE_Int known );
HYPRE_Int hypre_BoxManGetAllGlobalKnown ( hypre_BoxManager *manager , HYPRE_Int *known );
HYPRE_Int hypre_BoxManSetIsEntriesSort ( hypre_BoxManager *manager , HYPRE_Int is_sort );
HYPRE_Int hypre_BoxManGetIsEntriesSort ( hypre_BoxManager *manager , HYPRE_Int *is_sort );
HYPRE_Int hypre_BoxManGetGlobalIsGatherCalled ( hypre_BoxManager *manager , MPI_Comm comm , HYPRE_Int *is_gather );
HYPRE_Int hypre_BoxManGetAssumedPartition ( hypre_BoxManager *manager , hypre_StructAssumedPart **assumed_partition );
HYPRE_Int hypre_BoxManSetAssumedPartition ( hypre_BoxManager *manager , hypre_StructAssumedPart *assumed_partition );
HYPRE_Int hypre_BoxManSetBoundingBox ( hypre_BoxManager *manager , hypre_Box *bounding_box );
HYPRE_Int hypre_BoxManSetNumGhost ( hypre_BoxManager *manager , HYPRE_Int *num_ghost );
HYPRE_Int hypre_BoxManDeleteMultipleEntriesAndInfo ( hypre_BoxManager *manager , HYPRE_Int *indices , HYPRE_Int num );
HYPRE_Int hypre_BoxManCreate ( HYPRE_Int max_nentries , HYPRE_Int info_size , HYPRE_Int dim , hypre_Box *bounding_box , MPI_Comm comm , hypre_BoxManager **manager_ptr );
HYPRE_Int hypre_BoxManIncSize ( hypre_BoxManager *manager , HYPRE_Int inc_size );
HYPRE_Int hypre_BoxManDestroy ( hypre_BoxManager *manager );
HYPRE_Int hypre_BoxManAddEntry ( hypre_BoxManager *manager , hypre_Index imin , hypre_Index imax , HYPRE_Int proc_id , HYPRE_Int box_id , void *info );
HYPRE_Int hypre_BoxManGetEntry ( hypre_BoxManager *manager , HYPRE_Int proc , HYPRE_Int id , hypre_BoxManEntry **entry_ptr );
HYPRE_Int hypre_BoxManGetAllEntries ( hypre_BoxManager *manager , HYPRE_Int *num_entries , hypre_BoxManEntry **entries );
HYPRE_Int hypre_BoxManGetAllEntriesBoxes ( hypre_BoxManager *manager , hypre_BoxArray *boxes );
HYPRE_Int hypre_BoxManGetLocalEntriesBoxes ( hypre_BoxManager *manager , hypre_BoxArray *boxes );
HYPRE_Int hypre_BoxManGetAllEntriesBoxesProc ( hypre_BoxManager *manager , hypre_BoxArray *boxes , HYPRE_Int **procs_ptr );
HYPRE_Int hypre_BoxManGatherEntries ( hypre_BoxManager *manager , hypre_Index imin , hypre_Index imax );
HYPRE_Int hypre_BoxManAssemble ( hypre_BoxManager *manager );
HYPRE_Int hypre_BoxManIntersect ( hypre_BoxManager *manager , hypre_Index ilower , hypre_Index iupper , hypre_BoxManEntry ***entries_ptr , HYPRE_Int *nentries_ptr );
HYPRE_Int hypre_FillResponseBoxManAssemble1 ( void *p_recv_contact_buf , HYPRE_Int contact_size , HYPRE_Int contact_proc , void *ro , MPI_Comm comm , void **p_send_response_buf , HYPRE_Int *response_message_size );
HYPRE_Int hypre_FillResponseBoxManAssemble2 ( void *p_recv_contact_buf , HYPRE_Int contact_size , HYPRE_Int contact_proc , void *ro , MPI_Comm comm , void **p_send_response_buf , HYPRE_Int *response_message_size );
/* communication_info.c */
HYPRE_Int hypre_CommInfoCreate ( hypre_BoxArrayArray *send_boxes , hypre_BoxArrayArray *recv_boxes , HYPRE_Int **send_procs , HYPRE_Int **recv_procs , HYPRE_Int **send_rboxnums , HYPRE_Int **recv_rboxnums , hypre_BoxArrayArray *send_rboxes , hypre_BoxArrayArray *recv_rboxes , HYPRE_Int boxes_match , hypre_CommInfo **comm_info_ptr );
HYPRE_Int hypre_CommInfoSetTransforms ( hypre_CommInfo *comm_info , HYPRE_Int num_transforms , hypre_Index *coords , hypre_Index *dirs , HYPRE_Int **send_transforms , HYPRE_Int **recv_transforms );
HYPRE_Int hypre_CommInfoGetTransforms ( hypre_CommInfo *comm_info , HYPRE_Int *num_transforms , hypre_Index **coords , hypre_Index **dirs );
HYPRE_Int hypre_CommInfoProjectSend ( hypre_CommInfo *comm_info , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_CommInfoProjectRecv ( hypre_CommInfo *comm_info , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_CommInfoDestroy ( hypre_CommInfo *comm_info );
HYPRE_Int hypre_CreateCommInfoFromStencil ( hypre_StructGrid *grid , hypre_StructStencil *stencil , hypre_CommInfo **comm_info_ptr );
HYPRE_Int hypre_CreateCommInfoFromNumGhost ( hypre_StructGrid *grid , HYPRE_Int *num_ghost , hypre_CommInfo **comm_info_ptr );
HYPRE_Int hypre_CreateCommInfoFromGrids ( hypre_StructGrid *from_grid , hypre_StructGrid *to_grid , hypre_CommInfo **comm_info_ptr );
/* computation.c */
HYPRE_Int hypre_ComputeInfoCreate ( hypre_CommInfo *comm_info , hypre_BoxArrayArray *indt_boxes , hypre_BoxArrayArray *dept_boxes , hypre_ComputeInfo **compute_info_ptr );
HYPRE_Int hypre_ComputeInfoProjectSend ( hypre_ComputeInfo *compute_info , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_ComputeInfoProjectRecv ( hypre_ComputeInfo *compute_info , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_ComputeInfoProjectComp ( hypre_ComputeInfo *compute_info , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_ComputeInfoDestroy ( hypre_ComputeInfo *compute_info );
HYPRE_Int hypre_CreateComputeInfo ( hypre_StructGrid *grid , hypre_StructStencil *stencil , hypre_ComputeInfo **compute_info_ptr );
HYPRE_Int hypre_ComputePkgCreate ( hypre_ComputeInfo *compute_info , hypre_BoxArray *data_space , HYPRE_Int num_values , hypre_StructGrid *grid , hypre_ComputePkg **compute_pkg_ptr );
HYPRE_Int hypre_ComputePkgDestroy ( hypre_ComputePkg *compute_pkg );
HYPRE_Int hypre_InitializeIndtComputations ( hypre_ComputePkg *compute_pkg , HYPRE_Complex *data , hypre_CommHandle **comm_handle_ptr );
HYPRE_Int hypre_FinalizeIndtComputations ( hypre_CommHandle *comm_handle );
/* HYPRE_struct_grid.c */
HYPRE_Int HYPRE_StructGridCreate ( MPI_Comm comm , HYPRE_Int dim , HYPRE_StructGrid *grid );
HYPRE_Int HYPRE_StructGridDestroy ( HYPRE_StructGrid grid );
HYPRE_Int HYPRE_StructGridSetExtents ( HYPRE_StructGrid grid , HYPRE_Int *ilower , HYPRE_Int *iupper );
HYPRE_Int HYPRE_StructGridSetPeriodic ( HYPRE_StructGrid grid , HYPRE_Int *periodic );
HYPRE_Int HYPRE_StructGridAssemble ( HYPRE_StructGrid grid );
HYPRE_Int HYPRE_StructGridSetNumGhost ( HYPRE_StructGrid grid , HYPRE_Int *num_ghost );
/* HYPRE_struct_matrix.c */
HYPRE_Int HYPRE_StructMatrixCreate ( MPI_Comm comm , HYPRE_StructGrid grid , HYPRE_StructStencil stencil , HYPRE_StructMatrix *matrix );
HYPRE_Int HYPRE_StructMatrixDestroy ( HYPRE_StructMatrix matrix );
HYPRE_Int HYPRE_StructMatrixInitialize ( HYPRE_StructMatrix matrix );
HYPRE_Int HYPRE_StructMatrixSetValues ( HYPRE_StructMatrix matrix , HYPRE_Int *grid_index , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixGetValues ( HYPRE_StructMatrix matrix , HYPRE_Int *grid_index , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixSetBoxValues ( HYPRE_StructMatrix matrix , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixGetBoxValues ( HYPRE_StructMatrix matrix , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixSetConstantValues ( HYPRE_StructMatrix matrix , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixAddToValues ( HYPRE_StructMatrix matrix , HYPRE_Int *grid_index , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixAddToBoxValues ( HYPRE_StructMatrix matrix , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixAddToConstantValues ( HYPRE_StructMatrix matrix , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructMatrixAssemble ( HYPRE_StructMatrix matrix );
HYPRE_Int HYPRE_StructMatrixSetNumGhost ( HYPRE_StructMatrix matrix , HYPRE_Int *num_ghost );
HYPRE_Int HYPRE_StructMatrixGetGrid ( HYPRE_StructMatrix matrix , HYPRE_StructGrid *grid );
HYPRE_Int HYPRE_StructMatrixSetSymmetric ( HYPRE_StructMatrix matrix , HYPRE_Int symmetric );
HYPRE_Int HYPRE_StructMatrixSetConstantEntries ( HYPRE_StructMatrix matrix , HYPRE_Int nentries , HYPRE_Int *entries );
HYPRE_Int HYPRE_StructMatrixPrint ( const char *filename , HYPRE_StructMatrix matrix , HYPRE_Int all );
HYPRE_Int HYPRE_StructMatrixMatvec ( HYPRE_Complex alpha , HYPRE_StructMatrix A , HYPRE_StructVector x , HYPRE_Complex beta , HYPRE_StructVector y );
HYPRE_Int HYPRE_StructMatrixClearBoundary( HYPRE_StructMatrix matrix );
/* HYPRE_struct_stencil.c */
HYPRE_Int HYPRE_StructStencilCreate ( HYPRE_Int dim , HYPRE_Int size , HYPRE_StructStencil *stencil );
HYPRE_Int HYPRE_StructStencilSetElement ( HYPRE_StructStencil stencil , HYPRE_Int element_index , HYPRE_Int *offset );
HYPRE_Int HYPRE_StructStencilDestroy ( HYPRE_StructStencil stencil );
/* HYPRE_struct_vector.c */
HYPRE_Int HYPRE_StructVectorCreate ( MPI_Comm comm , HYPRE_StructGrid grid , HYPRE_StructVector *vector );
HYPRE_Int HYPRE_StructVectorDestroy ( HYPRE_StructVector struct_vector );
HYPRE_Int HYPRE_StructVectorInitialize ( HYPRE_StructVector vector );
HYPRE_Int HYPRE_StructVectorSetValues ( HYPRE_StructVector vector , HYPRE_Int *grid_index , HYPRE_Complex values );
HYPRE_Int HYPRE_StructVectorSetBoxValues ( HYPRE_StructVector vector , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructVectorAddToValues ( HYPRE_StructVector vector , HYPRE_Int *grid_index , HYPRE_Complex values );
HYPRE_Int HYPRE_StructVectorAddToBoxValues ( HYPRE_StructVector vector , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructVectorScaleValues ( HYPRE_StructVector vector , HYPRE_Complex factor );
HYPRE_Int HYPRE_StructVectorGetValues ( HYPRE_StructVector vector , HYPRE_Int *grid_index , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructVectorGetBoxValues ( HYPRE_StructVector vector , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Complex *values );
HYPRE_Int HYPRE_StructVectorAssemble ( HYPRE_StructVector vector );
HYPRE_Int HYPRE_StructVectorPrint ( const char *filename , HYPRE_StructVector vector , HYPRE_Int all );
HYPRE_Int HYPRE_StructVectorSetNumGhost ( HYPRE_StructVector vector , HYPRE_Int *num_ghost );
HYPRE_Int HYPRE_StructVectorCopy ( HYPRE_StructVector x , HYPRE_StructVector y );
HYPRE_Int HYPRE_StructVectorSetConstantValues ( HYPRE_StructVector vector , HYPRE_Complex values );
HYPRE_Int HYPRE_StructVectorGetMigrateCommPkg ( HYPRE_StructVector from_vector , HYPRE_StructVector to_vector , HYPRE_CommPkg *comm_pkg );
HYPRE_Int HYPRE_StructVectorMigrate ( HYPRE_CommPkg comm_pkg , HYPRE_StructVector from_vector , HYPRE_StructVector to_vector );
HYPRE_Int HYPRE_CommPkgDestroy ( HYPRE_CommPkg comm_pkg );
/* project.c */
HYPRE_Int hypre_ProjectBox ( hypre_Box *box , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_ProjectBoxArray ( hypre_BoxArray *box_array , hypre_Index index , hypre_Index stride );
HYPRE_Int hypre_ProjectBoxArrayArray ( hypre_BoxArrayArray *box_array_array , hypre_Index index , hypre_Index stride );
/* struct_axpy.c */
HYPRE_Int hypre_StructAxpy ( HYPRE_Complex alpha , hypre_StructVector *x , hypre_StructVector *y );
/* struct_communication.c */
HYPRE_Int hypre_CommPkgCreate ( hypre_CommInfo *comm_info , hypre_BoxArray *send_data_space , hypre_BoxArray *recv_data_space , HYPRE_Int num_values , HYPRE_Int **orders , HYPRE_Int reverse , MPI_Comm comm , hypre_CommPkg **comm_pkg_ptr );
HYPRE_Int hypre_CommTypeSetEntries ( hypre_CommType *comm_type , HYPRE_Int *boxnums , hypre_Box *boxes , hypre_Index stride , hypre_Index coord , hypre_Index dir , HYPRE_Int *order , hypre_BoxArray *data_space , HYPRE_Int *data_offsets );
HYPRE_Int hypre_CommTypeSetEntry ( hypre_Box *box , hypre_Index stride , hypre_Index coord , hypre_Index dir , HYPRE_Int *order , hypre_Box *data_box , HYPRE_Int data_box_offset , hypre_CommEntryType *comm_entry );
HYPRE_Int hypre_InitializeCommunication ( hypre_CommPkg *comm_pkg , HYPRE_Complex *send_data , HYPRE_Complex *recv_data , HYPRE_Int action , HYPRE_Int tag , hypre_CommHandle **comm_handle_ptr );
HYPRE_Int hypre_FinalizeCommunication ( hypre_CommHandle *comm_handle );
HYPRE_Int hypre_ExchangeLocalData ( hypre_CommPkg *comm_pkg , HYPRE_Complex *send_data , HYPRE_Complex *recv_data , HYPRE_Int action );
HYPRE_Int hypre_CommPkgDestroy ( hypre_CommPkg *comm_pkg );
/* struct_copy.c */
HYPRE_Int hypre_StructCopy ( hypre_StructVector *x , hypre_StructVector *y );
HYPRE_Int hypre_StructPartialCopy ( hypre_StructVector *x , hypre_StructVector *y , hypre_BoxArrayArray *array_boxes );
/* struct_grid.c */
HYPRE_Int hypre_StructGridCreate ( MPI_Comm comm , HYPRE_Int dim , hypre_StructGrid **grid_ptr );
HYPRE_Int hypre_StructGridRef ( hypre_StructGrid *grid , hypre_StructGrid **grid_ref );
HYPRE_Int hypre_StructGridDestroy ( hypre_StructGrid *grid );
HYPRE_Int hypre_StructGridSetPeriodic ( hypre_StructGrid *grid , hypre_Index periodic );
HYPRE_Int hypre_StructGridSetExtents ( hypre_StructGrid *grid , hypre_Index ilower , hypre_Index iupper );
HYPRE_Int hypre_StructGridSetBoxes ( hypre_StructGrid *grid , hypre_BoxArray *boxes );
HYPRE_Int hypre_StructGridSetBoundingBox ( hypre_StructGrid *grid , hypre_Box *new_bb );
HYPRE_Int hypre_StructGridSetIDs ( hypre_StructGrid *grid , HYPRE_Int *ids );
HYPRE_Int hypre_StructGridSetBoxManager ( hypre_StructGrid *grid , hypre_BoxManager *boxman );
HYPRE_Int hypre_StructGridSetMaxDistance ( hypre_StructGrid *grid , hypre_Index dist );
HYPRE_Int hypre_StructGridAssemble ( hypre_StructGrid *grid );
HYPRE_Int hypre_GatherAllBoxes ( MPI_Comm comm , hypre_BoxArray *boxes , HYPRE_Int dim , hypre_BoxArray **all_boxes_ptr , HYPRE_Int **all_procs_ptr , HYPRE_Int *first_local_ptr );
HYPRE_Int hypre_ComputeBoxnums ( hypre_BoxArray *boxes , HYPRE_Int *procs , HYPRE_Int **boxnums_ptr );
HYPRE_Int hypre_StructGridPrint ( FILE *file , hypre_StructGrid *grid );
HYPRE_Int hypre_StructGridRead ( MPI_Comm comm , FILE *file , hypre_StructGrid **grid_ptr );
HYPRE_Int hypre_StructGridSetNumGhost ( hypre_StructGrid *grid , HYPRE_Int *num_ghost );
#if defined(HYPRE_USING_CUDA)
HYPRE_Int hypre_StructGridGetMaxBoxSize(hypre_StructGrid *grid);
HYPRE_Int hypre_StructGridSetDataLocation( HYPRE_StructGrid grid, HYPRE_Int data_location );
#endif
/* struct_innerprod.c */
HYPRE_Real hypre_StructInnerProd ( hypre_StructVector *x , hypre_StructVector *y );
/* struct_io.c */
HYPRE_Int hypre_PrintBoxArrayData ( FILE *file , hypre_BoxArray *box_array , hypre_BoxArray *data_space , HYPRE_Int num_values , HYPRE_Int dim , HYPRE_Complex *data );
HYPRE_Int hypre_PrintCCVDBoxArrayData ( FILE *file , hypre_BoxArray *box_array , hypre_BoxArray *data_space , HYPRE_Int num_values , HYPRE_Int center_rank , HYPRE_Int stencil_size , HYPRE_Int *symm_elements , HYPRE_Int dim , HYPRE_Complex *data );
HYPRE_Int hypre_PrintCCBoxArrayData ( FILE *file , hypre_BoxArray *box_array , hypre_BoxArray *data_space , HYPRE_Int num_values , HYPRE_Complex *data );
HYPRE_Int hypre_ReadBoxArrayData ( FILE *file , hypre_BoxArray *box_array , hypre_BoxArray *data_space , HYPRE_Int num_values , HYPRE_Int dim , HYPRE_Complex *data );
HYPRE_Int hypre_ReadBoxArrayData_CC ( FILE *file , hypre_BoxArray *box_array , hypre_BoxArray *data_space , HYPRE_Int stencil_size , HYPRE_Int real_stencil_size , HYPRE_Int constant_coefficient , HYPRE_Int dim , HYPRE_Complex *data );
/* struct_matrix.c */
HYPRE_Complex *hypre_StructMatrixExtractPointerByIndex ( hypre_StructMatrix *matrix , HYPRE_Int b , hypre_Index index );
hypre_StructMatrix *hypre_StructMatrixCreate ( MPI_Comm comm , hypre_StructGrid *grid , hypre_StructStencil *user_stencil );
hypre_StructMatrix *hypre_StructMatrixRef ( hypre_StructMatrix *matrix );
HYPRE_Int hypre_StructMatrixDestroy ( hypre_StructMatrix *matrix );
HYPRE_Int hypre_StructMatrixInitializeShell ( hypre_StructMatrix *matrix );
HYPRE_Int hypre_StructMatrixInitializeData ( hypre_StructMatrix *matrix , HYPRE_Complex *data ,HYPRE_Complex *data_const);
HYPRE_Int hypre_StructMatrixInitialize ( hypre_StructMatrix *matrix );
HYPRE_Int hypre_StructMatrixSetValues ( hypre_StructMatrix *matrix , hypre_Index grid_index , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values , HYPRE_Int action , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructMatrixSetBoxValues ( hypre_StructMatrix *matrix , hypre_Box *set_box , hypre_Box *value_box , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values , HYPRE_Int action , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructMatrixSetConstantValues ( hypre_StructMatrix *matrix , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Complex *values , HYPRE_Int action );
HYPRE_Int hypre_StructMatrixClearValues ( hypre_StructMatrix *matrix , hypre_Index grid_index , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructMatrixClearBoxValues ( hypre_StructMatrix *matrix , hypre_Box *clear_box , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructMatrixAssemble ( hypre_StructMatrix *matrix );
HYPRE_Int hypre_StructMatrixSetNumGhost ( hypre_StructMatrix *matrix , HYPRE_Int *num_ghost );
HYPRE_Int hypre_StructMatrixSetConstantCoefficient ( hypre_StructMatrix *matrix , HYPRE_Int constant_coefficient );
HYPRE_Int hypre_StructMatrixSetConstantEntries ( hypre_StructMatrix *matrix , HYPRE_Int nentries , HYPRE_Int *entries );
HYPRE_Int hypre_StructMatrixClearGhostValues ( hypre_StructMatrix *matrix );
HYPRE_Int hypre_StructMatrixPrint ( const char *filename , hypre_StructMatrix *matrix , HYPRE_Int all );
HYPRE_Int hypre_StructMatrixMigrate ( hypre_StructMatrix *from_matrix , hypre_StructMatrix *to_matrix );
hypre_StructMatrix *hypre_StructMatrixRead ( MPI_Comm comm , const char *filename , HYPRE_Int *num_ghost );
HYPRE_Int hypre_StructMatrixClearBoundary( hypre_StructMatrix *matrix);
/* struct_matrix_mask.c */
hypre_StructMatrix *hypre_StructMatrixCreateMask ( hypre_StructMatrix *matrix , HYPRE_Int num_stencil_indices , HYPRE_Int *stencil_indices );
/* struct_matvec.c */
void *hypre_StructMatvecCreate ( void );
HYPRE_Int hypre_StructMatvecSetup ( void *matvec_vdata , hypre_StructMatrix *A , hypre_StructVector *x );
HYPRE_Int hypre_StructMatvecCompute ( void *matvec_vdata , HYPRE_Complex alpha , hypre_StructMatrix *A , hypre_StructVector *x , HYPRE_Complex beta , hypre_StructVector *y );
HYPRE_Int hypre_StructMatvecCC0 ( HYPRE_Complex alpha , hypre_StructMatrix *A , hypre_StructVector *x , hypre_StructVector *y , hypre_BoxArrayArray *compute_box_aa , hypre_IndexRef stride );
HYPRE_Int hypre_StructMatvecCC1 ( HYPRE_Complex alpha , hypre_StructMatrix *A , hypre_StructVector *x , hypre_StructVector *y , hypre_BoxArrayArray *compute_box_aa , hypre_IndexRef stride );
HYPRE_Int hypre_StructMatvecCC2 ( HYPRE_Complex alpha , hypre_StructMatrix *A , hypre_StructVector *x , hypre_StructVector *y , hypre_BoxArrayArray *compute_box_aa , hypre_IndexRef stride );
HYPRE_Int hypre_StructMatvecDestroy ( void *matvec_vdata );
HYPRE_Int hypre_StructMatvec ( HYPRE_Complex alpha , hypre_StructMatrix *A , hypre_StructVector *x , HYPRE_Complex beta , hypre_StructVector *y );
/* struct_scale.c */
HYPRE_Int hypre_StructScale ( HYPRE_Complex alpha , hypre_StructVector *y );
/* struct_stencil.c */
hypre_StructStencil *hypre_StructStencilCreate ( HYPRE_Int dim , HYPRE_Int size , hypre_Index *shape );
hypre_StructStencil *hypre_StructStencilRef ( hypre_StructStencil *stencil );
HYPRE_Int hypre_StructStencilDestroy ( hypre_StructStencil *stencil );
HYPRE_Int hypre_StructStencilElementRank ( hypre_StructStencil *stencil , hypre_Index stencil_element );
HYPRE_Int hypre_StructStencilSymmetrize ( hypre_StructStencil *stencil , hypre_StructStencil **symm_stencil_ptr , HYPRE_Int **symm_elements_ptr );
/* struct_vector.c */
hypre_StructVector *hypre_StructVectorCreate ( MPI_Comm comm , hypre_StructGrid *grid );
hypre_StructVector *hypre_StructVectorRef ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorDestroy ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorInitializeShell ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorInitializeData ( hypre_StructVector *vector , HYPRE_Complex *data);
HYPRE_Int hypre_StructVectorInitialize ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorSetValues ( hypre_StructVector *vector , hypre_Index grid_index , HYPRE_Complex *values , HYPRE_Int action , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructVectorSetBoxValues ( hypre_StructVector *vector , hypre_Box *set_box , hypre_Box *value_box , HYPRE_Complex *values , HYPRE_Int action , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructVectorClearValues ( hypre_StructVector *vector , hypre_Index grid_index , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructVectorClearBoxValues ( hypre_StructVector *vector , hypre_Box *clear_box , HYPRE_Int boxnum , HYPRE_Int outside );
HYPRE_Int hypre_StructVectorClearAllValues ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorSetNumGhost ( hypre_StructVector *vector , HYPRE_Int *num_ghost );
HYPRE_Int hypre_StructVectorSetDataSize(hypre_StructVector *vector , HYPRE_Int *data_size, HYPRE_Int *data_host_size);
HYPRE_Int hypre_StructVectorAssemble ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorCopy ( hypre_StructVector *x , hypre_StructVector *y );
HYPRE_Int hypre_StructVectorSetConstantValues ( hypre_StructVector *vector , HYPRE_Complex values );
HYPRE_Int hypre_StructVectorSetFunctionValues ( hypre_StructVector *vector , HYPRE_Complex (*fcn )());
HYPRE_Int hypre_StructVectorClearGhostValues ( hypre_StructVector *vector );
HYPRE_Int hypre_StructVectorClearBoundGhostValues ( hypre_StructVector *vector , HYPRE_Int force );
HYPRE_Int hypre_StructVectorScaleValues ( hypre_StructVector *vector , HYPRE_Complex factor );
hypre_CommPkg *hypre_StructVectorGetMigrateCommPkg ( hypre_StructVector *from_vector , hypre_StructVector *to_vector );
HYPRE_Int hypre_StructVectorMigrate ( hypre_CommPkg *comm_pkg , hypre_StructVector *from_vector , hypre_StructVector *to_vector );
HYPRE_Int hypre_StructVectorPrint ( const char *filename , hypre_StructVector *vector , HYPRE_Int all );
hypre_StructVector *hypre_StructVectorRead ( MPI_Comm comm , const char *filename , HYPRE_Int *num_ghost );
hypre_StructVector *hypre_StructVectorClone ( hypre_StructVector *vector );
#ifdef __cplusplus
}
#endif
#endif
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
#if EIGEN_HAS_CXX11_ATOMIC
#include <atomic>
#endif
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
// volatile is not enough on all architectures (see bug 1572)
// to guarantee that when thread A says to thread B that it is
// done with packing a block, then all writes have been really
// carried out... C++11 memory model+atomic guarantees this.
#if EIGEN_HAS_CXX11_ATOMIC
std::atomic<Index> sync;
std::atomic<int> users;
#else
Index volatile sync;
int volatile users;
#endif
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
// Without C++11, we have to disable GEMM's parallelization on
// non x86 architectures because there volatile is not enough for our purpose.
// See bug 1572.
#if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64))
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redesigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(8*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(4*t1+Nx+5,1024)),floord(8*t2+Nx+4,1024)),floord(8*t3+Nx+4,1024)),floord(8*t1-8*t2+Nz+Nx+3,1024));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),1024*t4+1022),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
irbuilder_unroll_unroll_partial_factor.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_partial_factor_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 2
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 2
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0)
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]]
// CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]]
// CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 2
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 2, %[[TMP12]]
// CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP19]], %[[TMP22]]
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP25]]
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_factor_for(float *a, float *b, float *c, float *d) {
#pragma omp for
#pragma omp unroll partial(2)
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 2}
|
androidfde_fmt_plug.c | /* androidfde.c
*
* hashkill - a hash cracking tool
* Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu>
*
* Modified for JtR and made stuff more generic
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_fde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_fde);
#else
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "os.h"
#include <stdint.h>
#include <stdlib.h>
#include <sys/types.h>
#include <string.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memory.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#include "sha2.h"
#include "memdbg.h"
#define FORMAT_TAG "$fde$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define FORMAT_LABEL "fde"
#define FORMAT_NAME "Android FDE"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " SHA256/AES"
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 SHA256/AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define PLAINTEXT_LENGTH 64
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN 8
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests fde_tests[] = {
{"$fde$16$04b36d4290b56e0fcca9778b74719ab8$16$b45f0f051f13f84872d1ef1abe0ada59$0f61d28f7466c0435040cc845a67e6734500de15df3ba6f48d2534ca2a7b8f910d7547357e8f1ec7364bab41383f5df9b5fb43fcd4a1e06189ce3c6ba77ec908b066e73a508e201c941fb409e9abdc051c3c052a735b01e56be61efa635e82cbceab18db1ba645b93f7befb83155852f0004a7c7d6800e9fa5f0d3c133dd2496f92110c3cdcfb16dcf57df8de830969e18514a34d4917de14597da19f9f7dc81eca2d7d461c91e0a8aeac06bafe89866d24f2b4991b4295b6277d0ff4ad97f1fa58e20f8a24e2062f84c318eb36cfbb4671117bc3522afcf7737353589cae0dce0d7c3341f457af654543758f3f005bd4d68fa2b35777cb2ea5f8f69c4debcfb1d8b2a601320e4f8621dc6e99434007388bdc0ceebc722f9ed44cbce3914bf144db332276e719f6b48108cde55916d861d19dc8c03ac76a2dad322457073111e441488228f13649073aa3aadfab51dadf89a0827acba284154a9e18d926facef43852a0733660a1fbcca8e81d2f41efd9f645a61f9395b75fc7ad446885d304808d511f2ba2e7c6138588c4292aee4ef6f2537bb00c7b015cee4a91d2defa87b67abc1315e71f0489e271673b36412377219e93aba6af3cfd504bf3f6bc24f2b6148536339d91ddd2f013314544650c1c11e7317028a7014909d0c850f78692e476c4f57da586fe26786504130aba22ba5261b989aeb47483d8cb9d5052120a4e5690b5b0cd009aadaadc351db7b6a230ebc1fa771651cb64d78daa56b7a6c6808db3b688afee9b7edaa617d8cb16ac7290465987bd443ea41ce38aa14e0c88874fb2707394b83679de82134efe351b4d021c63b2992a8314b2e93908906400628a7f753c9a4d85e917a207561b7840ce121800fab4026508d1b00fe8e7e756573743e11380f76f6bb7c0e528cb98875e6ad88bff51236601e6942964e37ffe0316b1a1f7bc0d84334fa024bf03c261bd06a07c01f099ad23fb9a1d8c98447463b8988cb33f3e1fb7d7a7c547f9a6d51cf7b75649d3c8cb5bf93be79eba1a961659b5fe928a1c7e80aca857825c6bc11493cb230e66126ef7b7284abe0823b5735bb1dfe844029f175c63442ca774784b775ecf02e48d029ac0f236813be91aca66905640666b89bd08118e3c18c75764bc49d00d1fe53ee92ccaa487852c613cba91f637b6de06dcaa1953a7cfb5333df573273a67f0157b63fbbf48c48f16c423caefaf29cdb5d34b19ac0f57b972b9e5ff1bc5cf25bdcdf8d29fb75865c4501458f19bfd64c844fd52a27feec97dc31ba922aea75706404d853071707d0c6001c59664676be6426ca5c7efbfc09ffa9acac91441f9175fd3148fb046c31a49d7c7ad10bf3c4b413dd148666b72b5a533f600cb02d7623270e5d1ad33355dd318d06aa8b3d7517cb7d5be40d222a026380cfbf5b79014e7631d677b07bcd805d9ea7103cf1d057bf883b29fb99b064c4e3cb4271596a74895c1c3f7c7c49d2be54b1435af4440ecd019dde11cee14a320712c9275bef339a15d3a18d9f38918d7af0a50a35199980429d74d4cc2a16dea619619a7c19827f4f78d3ebaf13340abf6717cec6bff8399b067fb17f11cdb1f9909c51253f7466ee769546d1d96319bcc1b04a6b1f8d8068f96b959d507c9004d75717792733fadb7a94a2d5db514a61cbd90eef89d1ace5a3138120168d62f1ebef5efbbd4e7f7e987834db81fe8c4877f3edcc71c61e96b20ca26c5a91e28fa11e484c1dcbfd5a0461065fe52f042ee9a09687d800c90a0a792f3dbe257965247f8eecd122b9b234b734454fa1477212a0295a347ae44463de4de405bf4fd91cde400b63d7fced6d7ccd20d79a4899139a79085f8742c3dfe7fbadca56c4e8aa95ce7841ad9675659349f6671d047efa0951feb9c61381f5f9e39182c1ec0a3ebd2ef5e036312c6ed6a0e59777813229ffdac771788e609c7d9f96848f63b428789c55e85c509068df8d5a0a7fc066be8c76205860d86d6c5bb7c2bc85a922a2ad86e6a791fe238420eedd1cf7ac770dd8316ca30c9577441a34873cdf0c5dc2103457a93fa0dd42da5eb2d6f82e9ff47b4bb6cd1d3fcba5645caace577a89c7bd70ff432f8dae113a7877a41a41043dac4c0d21860ad8198a1b9640d979322a20d4b90caa77a5d2b31c5bd06e", "strongpassword"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int max_cracked;
static struct custom_salt {
int loaded;
unsigned char *cipherbuf;
int keysize;
int iterations; // NOTE, not used. Hard coded to 2000 for FDE from droid <= 4.3 (PBKDF2-sha1)
int saltlen;
unsigned char data[512 * 3];
unsigned char salt[16];
unsigned char mkey[64];
unsigned char iv[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
max_cracked = self->params.max_keys_per_crypt;
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int saltlen, keysize, extra;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (!isdec(p))
goto err;
saltlen = atoi(p);
if (saltlen > 16) /* saltlen */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != saltlen * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* keysize */
goto err;
if (!isdec(p))
goto err;
keysize = atoi(p);
if (keysize > 64)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* key */
goto err;
if (hexlenl(p, &extra) != keysize * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data */
goto err;
if (hexlenl(p, &extra) != 512 * 3 * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
// int res;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.saltlen = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.saltlen; i++) {
cs.salt[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtokm(NULL, "$");
cs.keysize = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.keysize; i++) {
cs.mkey[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtokm(NULL, "$");
for (i = 0; i < 512 * 3; i++) {
cs.data[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// Not reference implementation - this is modified for use by androidfde!
static void AES_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int startsector,int size)
{
AES_KEY aeskey;
unsigned char essiv[16];
unsigned char essivhash[32];
SHA256_CTX ctx;
unsigned char sectorbuf[16];
unsigned char zeroiv[16];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, cur_salt->keysize);
SHA256_Final(essivhash, &ctx);
memset(sectorbuf,0,16);
memset(zeroiv,0,16);
memset(essiv,0,16);
memcpy(sectorbuf,&startsector,4);
AES_set_encrypt_key(essivhash, 256, &aeskey);
AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT);
AES_set_decrypt_key(key, cur_salt->keysize*8, &aeskey);
AES_cbc_encrypt(src, dst, size, &aeskey, essiv, AES_DECRYPT);
}
// cracked[index] = hash_plugin_check_hash(saved_key[index]);
void hash_plugin_check_hash(int index)
{
unsigned char keycandidate2[255];
unsigned char decrypted1[512]; // FAT
unsigned char decrypted2[512]; // ext3/4
AES_KEY aeskey;
uint16_t v2,v3,v4;
uint32_t v1,v5;
int j = 0;
#ifdef SIMD_COEF_32
unsigned char *keycandidate, Keycandidate[SSE_GROUP_SZ_SHA1][255];
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
uint32_t *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (uint32_t*)(Keycandidate[i]);
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 16,
2000, &(x.poutc), cur_salt->keysize + 16, 0);
#else
unsigned char keycandidate[255];
char *password = saved_key[index];
pbkdf2_sha1((const uint8_t*)password, strlen(password), (const uint8_t*)(cur_salt->salt),
16, 2000, keycandidate, cur_salt->keysize + 16, 0);
#endif
j = 0;
#ifdef SIMD_COEF_32
for (; j < SSE_GROUP_SZ_SHA1; ++j) {
keycandidate = Keycandidate[j];
#endif
AES_set_decrypt_key(keycandidate, cur_salt->keysize*8, &aeskey);
AES_cbc_encrypt(cur_salt->mkey, keycandidate2, 16, &aeskey, keycandidate+16, AES_DECRYPT);
AES_cbc_essiv(cur_salt->data, decrypted1, keycandidate2,0,32);
AES_cbc_essiv(cur_salt->data + 1024, decrypted2, keycandidate2,2,128);
// Check for FAT
if (!memcmp(decrypted1 + 3, "MSDOS5.0", 8))
cracked[index+j] = 1;
else {
// Check for extfs
memcpy(&v1,decrypted2+72,4);
memcpy(&v2,decrypted2+0x3a,2);
memcpy(&v3,decrypted2+0x3c,2);
memcpy(&v4,decrypted2+0x4c,2);
memcpy(&v5,decrypted2+0x48,4);
#if !ARCH_LITTLE_ENDIAN
v1 = JOHNSWAP(v1);
v2 = JOHNSWAP(v2);
v3 = JOHNSWAP(v3);
v4 = JOHNSWAP(v4);
v5 = JOHNSWAP(v5);
#endif
if ((v1<5)&&(v2<4)&&(v3<5)&&(v4<2)&&(v5<5))
cracked[index+j] = 1;
}
#ifdef SIMD_COEF_32
}
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*max_cracked);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
hash_plugin_check_hash(index);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void fde_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_fde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
fde_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
fde_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
test.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.4 $
***********************************************************************EHEADER*/
#include "headers.h"
int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
double trunc_factor,
int max_elmts)
{
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
int *P_diag_i = hypre_CSRMatrixI(P_diag);
int *P_diag_j = hypre_CSRMatrixJ(P_diag);
double *P_diag_data = hypre_CSRMatrixData(P_diag);
int *P_diag_j_new;
double *P_diag_data_new;
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
int *P_offd_i = hypre_CSRMatrixI(P_offd);
int *P_offd_j = hypre_CSRMatrixJ(P_offd);
double *P_offd_data = hypre_CSRMatrixData(P_offd);
int *P_offd_j_new;
double *P_offd_data_new;
int n_fine = hypre_CSRMatrixNumRows(P_diag);
int num_cols = hypre_CSRMatrixNumCols(P_diag);
int i, j, start_j;
int ierr = 0;
double max_coef;
int next_open = 0;
int now_checking = 0;
int next_open_offd = 0;
int now_checking_offd = 0;
int num_lost = 0;
int num_lost_offd = 0;
int num_lost_global = 0;
int num_lost_global_offd = 0;
int P_diag_size;
int P_offd_size;
int num_elmts;
int cnt, cnt_diag, cnt_offd;
double row_sum;
double scale;
/* Threading variables. Entry i of num_lost_(offd_)per_thread holds the
* number of dropped entries over thread i's row range. Cum_lost_per_thread
* will temporarily store the cumulative number of dropped entries up to
* each thread. */
int my_thread_num, num_threadsID, start, stop;
int * max_num_threads = hypre_CTAlloc(int, 1);
int * cum_lost_per_thread;
int * num_lost_per_thread;
int * num_lost_offd_per_thread;
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cum_lost_per_thread = hypre_CTAlloc(int, max_num_threads[0]);
num_lost_per_thread = hypre_CTAlloc(int, max_num_threads[0]);
num_lost_offd_per_thread = hypre_CTAlloc(int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
num_lost_per_thread[i] = 0;
num_lost_offd_per_thread[i] = 0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threadsID,max_coef,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt)
#endif
{
my_thread_num = hypre_GetThreadNum();
num_threadsID = hypre_NumActiveThreads();
/* Compute each thread's range of rows to truncate and compress. Note,
* that i, j and data are all compressed as entries are dropped, but
* that the compression only occurs locally over each thread's row
* range. P_diag_i is only made globally consistent at the end of this
* routine. During the dropping phases, P_diag_i[stop] will point to
* the start of the next thread's row range. */
/* my row range */
start = (n_fine/num_threadsID)*my_thread_num;
if (my_thread_num == num_threadsID-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threadsID)*(my_thread_num+1); }
/*
* Truncate based on truncation tolerance
*/
/*P_diag_i[n_fine] -= num_lost;
P_offd_i[n_fine] -= num_lost_offd;
} */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*
* Synchronize and create new diag data structures
*/
if (num_lost_global)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_diag_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_diag_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
hypre_TFree(P_diag_j);
hypre_TFree(P_diag_data);
hypre_CSRMatrixJ(P_diag) = P_diag_j_new;
hypre_CSRMatrixData(P_diag) = P_diag_data_new;
hypre_CSRMatrixNumNonzeros(P_diag) = P_diag_size;
}
}
} /* end parallel region */
hypre_TFree(max_num_threads);
hypre_TFree(cum_lost_per_thread);
hypre_TFree(num_lost_per_thread);
hypre_TFree(num_lost_offd_per_thread);
return ierr;
}
void hypre_qsort2abs( int *v,
double *w,
int left,
int right )
{
int i, last;
if (left >= right)
return;
swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(w[i]) > fabs(w[left]))
{
swap2(v, w, ++last, i);
}
swap2(v, w, left, last);
hypre_qsort2abs(v, w, left, last-1);
hypre_qsort2abs(v, w, last+1, right);
}
|
strassen-task-dep.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/**********************************************************************************************/
/*
* Copyright (c) 1996 Massachusetts Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to use, copy, modify, and distribute the Software without
* restriction, provided the Software, including any modified copies made
* under this license, is not distributed for a fee, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE MASSACHUSETTS INSTITUTE OF TECHNOLOGY BE LIABLE
* FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* /WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of the Massachusetts
* Institute of Technology shall not be used in advertising or otherwise
* to promote the sale, use or other dealings in this Software without
* prior written authorization from the Massachusetts Institute of
* Technology.
*
*/
#include <stdlib.h>
#include "strassen.h"
/*****************************************************************************
**
** OptimizedStrassenMultiply
**
** For large matrices A, B, and C of size MatrixSize * MatrixSize this
** function performs the operation C = A x B efficiently.
**
** INPUT:
** C = (*C WRITE) Address of top left element of matrix C.
** A = (*A IS READ ONLY) Address of top left element of matrix A.
** B = (*B IS READ ONLY) Address of top left element of matrix B.
** MatrixSize = Size of matrices (for n*n matrix, MatrixSize = n)
** RowWidthA = Number of elements in memory between A[x,y] and A[x,y+1]
** RowWidthB = Number of elements in memory between B[x,y] and B[x,y+1]
** RowWidthC = Number of elements in memory between C[x,y] and C[x,y+1]
**
** OUTPUT:
** C = (*C WRITE) Matrix C contains A x B. (Initial value of *C undefined.)
**
*****************************************************************************/
static void OptimizedStrassenMultiply_par(double *C, double *A, double *B,
unsigned MatrixSize, unsigned RowWidthC, unsigned RowWidthA,
unsigned RowWidthB, unsigned int Depth, unsigned int cutoff_depth,
unsigned cutoff_size)
{
unsigned QuadrantSize = MatrixSize >> 1; /* MatixSize / 2 */
unsigned QuadrantSizeInBytes = sizeof(double) * QuadrantSize * QuadrantSize;
unsigned Column, Row;
/************************************************************************
** For each matrix A, B, and C, we'll want pointers to each quandrant
** in the matrix. These quandrants will be addressed as follows:
** -- --
** | A A12 |
** | |
** | A21 A22 |
** -- --
************************************************************************/
double /* *A, *B, *C, */ *A12, *B12, *C12,
*A21, *B21, *C21, *A22, *B22, *C22;
double *S1,*S2,*S3,*S4,*S5,*S6,*S7,*S8,*M2,*M5,*T1sMULT;
#define T2sMULT C22
#define NumberOfVariables 11
char *Heap;
void *StartHeap;
if (MatrixSize <= cutoff_size) {
MultiplyByDivideAndConquer(C, A, B, MatrixSize, RowWidthC, RowWidthA, RowWidthB, 0);
return;
}
/* Initialize quandrant matrices */
A12 = A + QuadrantSize;
B12 = B + QuadrantSize;
C12 = C + QuadrantSize;
A21 = A + (RowWidthA * QuadrantSize);
B21 = B + (RowWidthB * QuadrantSize);
C21 = C + (RowWidthC * QuadrantSize);
A22 = A21 + QuadrantSize;
B22 = B21 + QuadrantSize;
C22 = C21 + QuadrantSize;
/* Allocate Heap Space Here */
Heap = (char*)malloc(QuadrantSizeInBytes * NumberOfVariables);
StartHeap = Heap;
/* Distribute the heap space over the variables */
S1 = (double*) Heap; Heap += QuadrantSizeInBytes;
S2 = (double*) Heap; Heap += QuadrantSizeInBytes;
S3 = (double*) Heap; Heap += QuadrantSizeInBytes;
S4 = (double*) Heap; Heap += QuadrantSizeInBytes;
S5 = (double*) Heap; Heap += QuadrantSizeInBytes;
S6 = (double*) Heap; Heap += QuadrantSizeInBytes;
S7 = (double*) Heap; Heap += QuadrantSizeInBytes;
S8 = (double*) Heap; Heap += QuadrantSizeInBytes;
M2 = (double*) Heap; Heap += QuadrantSizeInBytes;
M5 = (double*) Heap; Heap += QuadrantSizeInBytes;
T1sMULT = (double*) Heap; Heap += QuadrantSizeInBytes;
if (Depth < cutoff_depth)
{
#pragma omp task depend(in: A21, A22) depend(out: S1) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S1[Row * QuadrantSize + Column] = A21[RowWidthA * Row + Column] + A22[RowWidthA * Row + Column];
#pragma omp task depend(in: S1, A) depend(out: S2) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S2[Row * QuadrantSize + Column] = S1[Row * QuadrantSize + Column] - A[RowWidthA * Row + Column];
#pragma omp task depend(in: A12, S2) depend(out: S4) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S4[Row * QuadrantSize + Column] = A12[Row * RowWidthA + Column] - S2[QuadrantSize * Row + Column];
#pragma omp task depend(in: B12, B) depend(out: S5) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S5[Row * QuadrantSize + Column] = B12[Row * RowWidthB + Column] - B[Row * RowWidthB + Column];
#pragma omp task depend(in: B22, S5) depend(out: S6) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S6[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - S5[Row * QuadrantSize + Column];
#pragma omp task depend(in: S6, B21) depend(out: S8) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S8[Row * QuadrantSize + Column] = S6[Row * QuadrantSize + Column] - B21[Row * RowWidthB + Column];
#pragma omp task depend(in: A, A21) depend(out: S3) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S3[Row * QuadrantSize + Column] = A[RowWidthA * Row + Column] - A21[RowWidthA * Row + Column];
#pragma omp task depend(in: B22, B12) depend(out: S7) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++)
S7[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - B12[Row * RowWidthB + Column];
/* M2 = A x B */
#pragma omp task depend(in: A, B) depend(out: M2)
OptimizedStrassenMultiply_par(M2, A, B, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* M5 = S1 * S5 */
#pragma omp task untied depend(in: S1, S5) depend(out: M5)
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T1 = S2 x S6 + M2 */
#pragma omp task untied depend(in: S2, S6) depend(out: T1sMULT)
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T2 = T1 + S3 x S7 */
#pragma omp task untied depend(in: S3, S7) depend(out: C22)
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C = M2 + A12 * B21 */
#pragma omp task untied depend(in: A12, B21) depend(out: C)
OptimizedStrassenMultiply_par(C, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
#pragma omp task untied depend(in: S4, B22) depend(out: C12)
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C21 = T2 - A22 * S8 */
#pragma omp task untied depend(in: A22, S8) depend(out: C21)
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
#pragma omp task depend(inout: C) depend(in: M2) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C[RowWidthC * Row + Column] += M2[Row * QuadrantSize + Column];
#pragma omp task depend(inout: C12) depend(in: M5, T1sMULT, M2) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C12[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
#pragma omp task depend(inout: C21) depend(in: C22, T1sMULT, M2) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C21[RowWidthC * Row + Column] = -C21[RowWidthC * Row + Column] + C22[RowWidthC * Row + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
#pragma omp task depend(inout: C22) depend(in: M5, T1sMULT, M2) private(Row, Column)
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column += 1)
C22[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
#pragma omp taskwait
}
else
{
for (Row = 0; Row < QuadrantSize; Row++)
for (Column = 0; Column < QuadrantSize; Column++) {
S1[Row * QuadrantSize + Column] = A21[RowWidthA * Row + Column] + A22[RowWidthA * Row + Column];
S2[Row * QuadrantSize + Column] = S1[Row * QuadrantSize + Column] - A[RowWidthA * Row + Column];
S4[Row * QuadrantSize + Column] = A12[Row * RowWidthA + Column] - S2[QuadrantSize * Row + Column];
S5[Row * QuadrantSize + Column] = B12[Row * RowWidthB + Column] - B[Row * RowWidthB + Column];
S6[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - S5[Row * QuadrantSize + Column];
S8[Row * QuadrantSize + Column] = S6[Row * QuadrantSize + Column] - B21[Row * RowWidthB + Column];
S3[Row * QuadrantSize + Column] = A[RowWidthA * Row + Column] - A21[RowWidthA * Row + Column];
S7[Row * QuadrantSize + Column] = B22[Row * RowWidthB + Column] - B12[Row * RowWidthB + Column];
}
/* M2 = A x B */
OptimizedStrassenMultiply_par(M2, A, B, QuadrantSize, QuadrantSize, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* M5 = S1 * S5 */
OptimizedStrassenMultiply_par(M5, S1, S5, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T1 = S2 x S6 + M2 */
OptimizedStrassenMultiply_par(T1sMULT, S2, S6, QuadrantSize, QuadrantSize, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of T2 = T1 + S3 x S7 */
OptimizedStrassenMultiply_par(C22, S3, S7, QuadrantSize, RowWidthC /*FIXME*/, QuadrantSize, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C = M2 + A12 * B21 */
OptimizedStrassenMultiply_par(C, A12, B21, QuadrantSize, RowWidthC, RowWidthA, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C12 = S4 x B22 + T1 + M5 */
OptimizedStrassenMultiply_par(C12, S4, B22, QuadrantSize, RowWidthC, QuadrantSize, RowWidthB, Depth+1, cutoff_depth, cutoff_size);
/* Step 1 of C21 = T2 - A22 * S8 */
OptimizedStrassenMultiply_par(C21, A22, S8, QuadrantSize, RowWidthC, RowWidthA, QuadrantSize, Depth+1, cutoff_depth, cutoff_size);
for (Row = 0; Row < QuadrantSize; Row++) {
for (Column = 0; Column < QuadrantSize; Column += 1) {
C[RowWidthC * Row + Column] += M2[Row * QuadrantSize + Column];
C12[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
C21[RowWidthC * Row + Column] = -C21[RowWidthC * Row + Column] + C22[RowWidthC * Row + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
C22[RowWidthC * Row + Column] += M5[Row * QuadrantSize + Column] + T1sMULT[Row * QuadrantSize + Column] + M2[Row * QuadrantSize + Column];
}
}
}
free(StartHeap);
}
void strassen_main_par(double *A, double *B, double *C, int n, unsigned int cutoff_size, unsigned int cutoff_depth)
{
#pragma omp parallel
#pragma omp master
OptimizedStrassenMultiply_par(C, A, B, n, n, n, n, 1, cutoff_depth, cutoff_size);
}
|
valid.yolo10.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_512_17_17_1024_1_1.h"
#include "gen_ukr_A1B2gemm_1_512_17_17_1024_1_1.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 17;
int Ny = 17;
int Nh = 1;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int xy5=0;xy5<289+0;xy5+=289)
{
for(int f5=0;f5<512+0;f5+=512)
{
for(int c5=0;c5<1024+0;c5+=1024)
{
for(int xy4=xy5;xy4<min(289, 289+xy5);xy4+=289)
{
for(int f4=f5;f4<min(512, 512+f5);f4+=512)
{
for(int c4=c5;c4<min(1024, 1024+c5);c4+=1024)
{
for(int c3=c4;c3<min(1024, 1024+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(289, 289+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(512, 512+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(289, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(512, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(1024, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(1024, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(289, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(512, 16+f2);f1+=16)
{
int ctile=min(Tc1, 1024-c1);
int x1=xy1/17;
int y1=xy1%17/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*295936+c1_1*289+1*x1*17+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*16384+c1*16+0*16+0*16+kf1_2*1;
int offsetC=0+b1*147968+of1_1*289+x1*17+y1*1+of1_2*1;
if(17-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(17*17-xy1>=6){
for(int sti=17-y1;sti<6;sti+=1)
{
Astrides[sti]+=0;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=17-y1;sti<6;sti+=1)
{
Astrides[sti]-=0;
}
}
else{
cnn_ukr_float_scatter_1x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
terrain.c | #include "blocko.h"
float hmap[TILESW][TILESD];
float hmap2[TILESW][TILESD];
int tscootx, tscootz, tchunk_scootx, tchunk_scootz;
void gen_hmap(int x0, int x2, int z0, int z2)
{
unsigned seed = SEED4(x0, x2, z0, z2);
// pick corners if they aren't set
if (hmap[x0][z0] == 0) hmap[x0][z0] = RANDI(64, 127);
if (hmap[x0][z2] == 0) hmap[x0][z2] = RANDI(64, 127);
if (hmap[x2][z0] == 0) hmap[x2][z0] = RANDI(64, 127);
if (hmap[x2][z2] == 0) hmap[x2][z2] = RANDI(64, 127);
int x1 = (x0 + x2) / 2;
int z1 = (z0 + z2) / 2;
int w = (x2 - x0) / 4;
int d = (z2 - z0) / 4;
w = w ? w : 1;
d = d ? d : 1;
float d2 = d / 2.f;
float r = w > 2 ? 1.f : 0.f;
// edges middles
if (!hmap[x0][z1])
hmap[x0][z1] = (hmap[x0][z0] + hmap[x0][z2]) / 2.f + r * RANDF(-d2, d2);
if (!hmap[x2][z1])
hmap[x2][z1] = (hmap[x2][z0] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2);
if (!hmap[x1][z0])
hmap[x1][z0] = (hmap[x0][z0] + hmap[x2][z0]) / 2.f + r * RANDF(-d2, d2);
if (!hmap[x1][z2])
hmap[x1][z2] = (hmap[x0][z2] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2);
// middle middle
hmap[x1][z1] = (hmap[x0][z1] + hmap[x2][z1] + hmap[x1][z0] + hmap[x1][z2]) / 4.f + r * RANDF(-d, d);
// recurse if there are any unfilled spots
if(x1 - x0 > 1 || x2 - x1 > 1 || z1 - z0 > 1 || z2 - z1 > 1)
{
gen_hmap(x0, x1, z0, z1);
gen_hmap(x0, x1, z1, z2);
gen_hmap(x1, x2, z0, z1);
gen_hmap(x1, x2, z1, z2);
}
}
void smooth_hmap()
{
for (int x = 0; x < TILESW; x++) for (int z = 0; z < TILESD; z++)
{
float p365 = noise(x, 0, -z, 365);
int radius = p365 < 0.0f ? 3 :
p365 < 0.2f ? 2 : 1;
int x0 = x - radius;
int x1 = x + radius + 1;
int z0 = z - radius;
int z1 = z + radius + 1;
CLAMP(x0, 0, TILESW-1);
CLAMP(x1, 0, TILESW-1);
CLAMP(z0, 0, TILESD-1);
CLAMP(z1, 0, TILESD-1);
int sum = 0, n = 0;
for (int i = x0; i < x1; i++) for (int j = z0; j < z1; j++)
{
sum += hmap[i][j];
n++;
}
int res = sum / n;
float p800 = noise(x, 0, z, 800);
float p777 = noise(z, 0, x, 777);
float p301 = noise(x, 0, z, 301);
float p204 = noise(x, 0, z, 204);
float p33 = noise(x, 0, z, 32 * (1.1 + p301));
float swoosh = p33 > 0.3 ? (10 - 30 * (p33 - 0.3)) : 0;
float times = (p204 * 20.f) + 30.f;
float plus = (-p204 * 40.f) + 60.f;
CLAMP(times, 20.f, 40.f);
CLAMP(plus, 40.f, 80.f);
int beach_ht = (1.f - p777) * times + plus;
CLAMP(beach_ht, 90, 100);
if (res > beach_ht) // beaches
{
if (res > beach_ht + 21) res -= 18;
else res = ((res - beach_ht) / 7) + beach_ht;
}
float s = (1 + p204) * 0.2;
if (p800 > 0.0 + s)
{
float t = (p800 - 0.0 - s) * 10;
CLAMP(t, 0.f, 1.f);
res = lerp(t, res, 102);
if (res == 102 && swoosh) res = 101;
}
hmap2[x][z] = res < TILESH - 1 ? res : TILESH - 1;
}
}
void create_hmap()
{
// generate in pieces
for (int i = 0; i < 8; i++) for (int j = 0; j < 8; j++)
{
int x0 = (i ) * TILESW / 8;
int x1 = (i+1) * TILESW / 8;
int z0 = (j ) * TILESD / 8;
int z1 = (j+1) * TILESD / 8;
CLAMP(x1, 0, TILESW-1);
CLAMP(z1, 0, TILESD-1);
gen_hmap(x0, x1, z0 , z1);
}
smooth_hmap();
}
void gen_chunk(int xlo, int xhi, int zlo, int zhi)
{
CLAMP(xlo, 0, TILESW-1);
CLAMP(xhi, 0, TILESW-1);
CLAMP(zlo, 0, TILESD-1);
CLAMP(zhi, 0, TILESD-1);
static char column_already_generated[TILESW][TILESD];
int x;
#pragma omp parallel for
for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++)
{
if (x == xlo && z == zlo)
omp_threads = omp_get_num_threads();
if (column_already_generated[x][z])
continue;
column_already_generated[x][z] = true;
float p1080 = noise(x, 0, -z, 1080);
float p530 = noise(z, 0, x, 530);
float p630 = noise(-z, 0, x, 629);
float p200 = noise(x, 0, z, 200);
float p80 = noise(x, 0, z, 80);
float p15 = noise(z, 0, -x, 15);
//float p5 = noise(-x, 0, z, 5);
if (p200 > 0.2f)
{
float flatten = (p200 - 0.2f) * 80;
CLAMP(flatten, 1, 12);
hmap2[x][z] -= 100;
hmap2[x][z] /= flatten;
hmap2[x][z] += 100;
}
int solid_depth = 0;
int slicey_bit = false;
int plateau_bit = false;
int mode = p1080 > 0 ? 1 : 10;
for (int y = 0; y < TILESH; y++)
{
if (y == TILESH - 1) { TT_(x, y, z) = HARD; continue; }
float p300 = noise(x, y, z, 300);
float p32 = noise(x, y*mode, z, 16 + 16 * (1.1 + p300));
float plat = p32 > 0.3 ? (10 - 30 * (p32 * p32 * p32 - 0.3)) : 0;
float p90 = noise(x, y, z, 90);
float p91 = noise(x+1000, y+1000, z+1000, 91);
float p42 = noise(x, y*(p300 + 1), z, 42);
float p9 = noise(x, y*0.05, z, 9);
float p2 = noise(-z, y, x, 2);
if (p300 + fabsf(p80) * 0.25 + p15 * 0.125 < -0.5) { plat = -plat; }
else if (p300 < 0.5) { plat = 0; }
int cave = (p90 < -0.24 || p91 < -0.24) && (p42 > 0.5 && p9 < 0.4);
if (y > hmap2[x][z] - ((p80 + 1) * 20) && p90 > 0.4 && p91 > 0.4 && p42 > 0.01 && p42 < 0.09 && p300 > 0.3)
slicey_bit = true;
int platted = y < hmap2[x][z] + plat * (mode * 0.125f + 0.875f);
if ((cave || platted) && !plateau_bit)
{
unsigned seed = SEED2(x, z);
if (!slicey_bit || RANDP(5))
{
int type = (y > 100 && hmap2[x][z] > 99) ? WATR : OPEN; //only allow water below low heightmap
TT_(x, y, z) = type;
solid_depth = 0;
slicey_bit = false;
goto out;
}
}
else
{
if (mode == 10 && plat && !cave && y < hmap2[x][z])
plateau_bit = true;
slicey_bit = false;
}
solid_depth++;
float p16 = noise(x, y, z, 16);
int slv = 76 + p530 * 20;
int dlv = 86 + p630 * 20;
int ore = p2 > 0.4f ? ORE : OREH;
int ston = p42 > 0.4f && p9 < -0.3f ? ore : STON;
if (slicey_bit) TT_(x, y, z) = p9 > 0.4f ? HARD : SAND;
else if (solid_depth > 14 + 5 * p9) TT_(x, y, z) = GRAN;
else if (y < slv - 5 * p16) TT_(x, y, z) = ston;
else if (y < dlv - 5 * p16) TT_(x, y, z) = p80 > (-solid_depth * 0.1f) ? DIRT : OPEN; // erosion
else if (y < 100 - 5 * p16) TT_(x, y, z) = solid_depth == 1 ? GRAS : DIRT;
else if (y < 120 ) TT_(x, y, z) = solid_depth < 4 + 5 * p9 ? SAND : ston;
else TT_(x, y, z) = HARD;
out: ;
}
}
// find nearby bezier curvy caves
#define REGW (CHUNKW*16)
#define REGD (CHUNKD*16)
// find region ,-- have to add 1 bc we're overdrawing chunks
// lower bound /
int rxlo = (int)((xlo+1) / REGW) * REGW;
int rzlo = (int)((zlo+1) / REGD) * REGD;
unsigned seed = SEED2(rxlo, rzlo);
// find region center
int rxcenter = rxlo + REGW/2;
int rzcenter = rzlo + REGD/2;
struct point PC = (struct point){rxcenter, TILESH - RANDI(1, 25), rzcenter};
struct point P0;
struct point P1;
struct point P2;
struct point P3 = PC;
int nr_caves = RANDI(0, 100);
// cave system stretchiness
int sx = RANDI(10, 60);
int sy = RANDI(10, 60);
int sz = RANDI(10, 60);
#define MAX_CAVE_POINTS 10000
#define QCAVE(x,y,z,radius_sq) ((struct qcave){x, y, z, radius_sq})
struct qcave cave_points[MAX_CAVE_POINTS];
int cave_p_len = 0;
for (int i = 0; i < nr_caves; i++)
{
// random walk from center of region, or end of last curve
P0 = RANDP(33) ? PC : P3;
P1 = (struct point){P0.x + RANDI(-sx, sx), P0.y + RANDI(-sy, sy), P0.z + RANDI(-sz, sz)};
P2 = (struct point){P1.x + RANDI(-sx, sx), P1.y + RANDI(-sy, sy), P1.z + RANDI(-sz, sz)};
P3 = (struct point){P2.x + RANDI(-sx, sx), P2.y + RANDI(-sy, sy), P2.z + RANDI(-sz, sz)};
float root_radius = 0.f, delta = 0.f;
for (float t = 0.f; t <= 1.f; t += 0.001f)
{
if (cave_p_len >= MAX_CAVE_POINTS) break;
if (root_radius == 0.f || RANDP(0.002f))
{
root_radius = RAND01;
delta = RANDF(-0.001f, 0.001f);
}
root_radius += delta;
float radius_sq = root_radius * root_radius * root_radius * root_radius * 50.f;
CLAMP(radius_sq, 1.f, 50.f);
float s = 1.f - t;
int x = (int)(s*s*s*P0.x + 3.f*t*s*s*P1.x + 3.f*t*t*s*P2.x + t*t*t*P3.x);
int y = (int)(s*s*s*P0.y + 3.f*t*s*s*P1.y + 3.f*t*t*s*P2.y + t*t*t*P3.y);
int z = (int)(s*s*s*P0.z + 3.f*t*s*s*P1.z + 3.f*t*t*s*P2.z + t*t*t*P3.z);
// TODO: don't store duplicate cave points?
if (x >= xlo && x <= xhi && y >= 0 && y <= TILESD - 1 && z >= zlo && z <= zhi)
cave_points[cave_p_len++] = QCAVE(x, y, z, radius_sq);
}
}
// carve caves
#pragma omp parallel for
for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++) for (int y = 0; y < TILESH-2; y++)
for (int i = 0; i < cave_p_len; i++)
{
int dist_sq = DIST_SQ(cave_points[i].x - x, cave_points[i].y - y, cave_points[i].z - z);
if (dist_sq <= cave_points[i].radius_sq)
{
TT_(x, y, z) = OPEN;
break;
}
}
// correcting pass over middle, contain floating water
#pragma omp parallel for
for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++) for (int y = 100; y < TILESH-2; y++)
{
if (TT_(x, y, z) == WATR)
{
if (TT_(x , y , z-1) == OPEN ||
TT_(x , y , z+1) == OPEN ||
TT_(x-1, y , z ) == OPEN ||
TT_(x+1, y , z ) == OPEN ||
TT_(x , y+1, z ) == OPEN)
TT_(x, y, z) = WOOD;
}
}
// trees?
float p191 = noise(zlo, 0, xlo, 191);
seed = SEED2(xlo, zlo);
if (p191 > 0.2f) while (RANDP(95))
{
char leaves = RANDBOOL ? RLEF : YLEF;
float radius = RANDF(1.f, 4.f);
int x = xlo + CHUNKW/2 + RANDI(-5, 5);
int z = zlo + CHUNKD/2 + RANDI(-5, 5);
for (int y = 10; y < TILESH-2; y++)
{
if (TT_(x, y, z) == OPEN)
continue;
if (TT_(x, y, z) != GRAS && TT_(x, y, z) != DIRT)
break;
int yy = y;
for (; yy >= y - RANDI(3, 8); yy--)
TT_(x, yy, z) = WOOD;
int ymax = yy + RANDI(2, 4);
for (int i = x-3; i <= x+3; i++) for (int j = yy-3; j <= ymax; j++) for (int k = z-3; k <= z+3; k++)
{
float dist = (i-x) * (i-x) + (j-yy) * (j-yy) + (k-z) * (k-z);
if (TT_(i, j, k) == OPEN && dist < radius * radius)
TT_(i, j, k) = leaves;
}
break;
}
}
// cleanup gndheight and set initial lighting
#pragma omp parallel for
for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++)
{
int above_ground = true;
int light_level = 15;
int wet = false;
for (int y = 0; y < TILESH-1; y++)
{
if (above_ground && IS_OPAQUE(x, y, z))
{
TGNDH_(x, z) = y;
above_ground = false;
if (y)
{
TSUN_(x, y-1, z) = 0;
sun_enqueue(x, y-1, z, 0, light_level);
}
light_level = 0;
}
if (wet && TT_(x, y, z) == OPEN)
TT_(x, y, z) = WATR;
if (wet && IS_SOLID(x, y, z))
wet = false;
if (TT_(x, y, z) == WATR)
{
wet = true;
if (light_level) light_level--;
if (light_level) light_level--;
}
TSUN_(x, y, z) = light_level;
}
}
recalc_corner_lighting(xlo, xhi, zlo, zhi);
}
// update terrain worker thread(s) copies of scoot vars
void terrain_apply_scoot()
{
#pragma omp critical
{
tscootx = future_scootx * CHUNKW;
tscootz = future_scootz * CHUNKD;
tchunk_scootx = future_scootx;
tchunk_scootz = future_scootz;
}
}
// on its own thread, loops forever building chunks when needed
void chunk_builder()
{ for(;;) {
terrain_apply_scoot();
int best_x = 0, best_z = 0;
int px = (player[0].pos.x / BS + CHUNKW2) / CHUNKW;
int pz = (player[0].pos.z / BS + CHUNKD2) / CHUNKD;
CLAMP(px, 0, VAOW-1);
CLAMP(pz, 0, VAOD-1);
// find nearest ungenerated chunk
int best_dist = 99999999;
for (int x = 0; x < VAOW; x++) for (int z = 0; z < VAOD; z++)
{
if (TAGEN_(x, z)) continue;
int dist_sq = (x - px) * (x - px) + (z - pz) * (z - pz);
if (dist_sq < best_dist)
{
best_dist = dist_sq;
best_x = x;
best_z = z;
}
}
if (best_dist == 99999999)
{
SDL_Delay(1);
continue;
}
int xlo = best_x * CHUNKW;
int zlo = best_z * CHUNKD;
int xhi = xlo + CHUNKW;
int zhi = zlo + CHUNKD;
int ticks_before = SDL_GetTicks();
gen_chunk(xlo-1, xhi+1, zlo-1, zhi+1);
nr_chunks_generated++;
chunk_gen_ticks += SDL_GetTicks() - ticks_before;
TAGEN_(best_x, best_z) = true;
#pragma omp critical
{
just_generated[just_gen_len].x = best_x;
just_generated[just_gen_len].z = best_z;
just_gen_len++;
}
} }
|
pr64734.c | /* PR middle-end/64734 */
#include <stdlib.h>
void
foo (int *x, int *y)
{
#pragma omp target map (alloc:x[0]) map (alloc:y[0:8])
{
int i;
for (i = 0; i < 8; i++)
if (y[i] != 2 + i)
break;
if (i != 8 || *x != 1)
*x = 6;
else
{
*x = 8;
for (i = 0; i < 8; i++)
y[i] = 9 + i;
}
}
#pragma omp target update from (y[0:8]) from (x[0])
}
void
bar (void)
{
int x = 1, y[32] = { 0 };
#pragma omp target data map (to:y[0:32]) map (to:x)
;
}
int
main ()
{
int x = 1, y[8] = { 2, 3, 4, 5, 6, 7, 8, 9 }, i;
#pragma omp target data map (to:y[0:8]) map (to:x)
;
#pragma omp target data map (to:y[0:8]) map (to:x)
{
#pragma omp target update from (y[0:8]) from (x)
}
#pragma omp target data map (to:y[0:8]) map (to:x)
foo (&x, &y[0]);
if (x != 8)
abort ();
for (i = 0; i < 8; i++)
if (y[i] != 9 + i)
abort ();
return 0;
}
|
laplace2d.c | #include <math.h>
#include <string.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
#pragma acc data copy(A), create(Anew)
while ( error > tol && iter < iter_max )
{
error = 0.0;
//#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
//#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return omp_get_max_threads();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
static_cast<size_t>(N), static_cast<size_t>(omp_threads))) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const int length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
/*!
* \brief Wrap Kernel<OP, xpu>::Launch* with some special-case helpers
*/
template<typename OP, typename xpu>
struct KernelWrapper {
/*!
* \brief Launch 'mshadow_op-type' op (i.e. DType (*)( ... ) { return <operation> }
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream object pointer (unused)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename DType, typename ...Args>
MSHADOW_CINLINE static void LaunchMShadowOpEx(mshadow::Stream<xpu> *s,
const int N,
DType *dest,
Args... args) {
mxnet::op::mxnet_op::Kernel<OP, xpu>::template LaunchTuned<
typename OP::Operation, DType>(s, N, dest, args...);
}
/*!
* \brief Launch 'mxnet_op-type' op (i.e. void (*)(int N, DType *out, ... )
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream object pointer (unused)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename DType, typename ...Args>
MSHADOW_CINLINE static void LaunchMXNetOpEx(mshadow::Stream<xpu> *s,
const int N,
DType *dest,
Args... args) {
mxnet::op::mxnet_op::Kernel<OP, xpu>::template LaunchTuned<OP, DType>(s, N, dest, args...);
}
};
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
_MXNET_TUNABLE_MXNET_OP_FWD(set_zero); // _ prefix denotes "already in mxnet_op namespace"
_MXNET_TUNABLE_MXNET_OP_FWD(set_one);
} // namespace mxnet_op
/*!
* \brief Tuning specializations for the simple ops in <mshadow/base.h>
* Basically, this overrides mxnet::op::mxnet_op::Kernel<OP, cpu>::Launch() and
* redirects to mxnet::op::mxnet_op::KernelWrapper<OP, cpu>::Launch????OpEx(),
* which eventually leads back to mxnet::op::mxnet_op::Kernel<OP, cpu>::LaunchTuned()
*/
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::identity)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::plus)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::minus)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::mul)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::div)
MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::right)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
loop_fusion_par.c | void compute(double **a, double **b, double **c, double **d, int N, int num_threads) {
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < N; i++) {
for (int j = 0 ; j < N-1; j++) {
a[i][j] = 2 * c[i][j];
d[i][j] = 2 * c[i][j+1] * b[i][j+1] + c[i][j] ;
}
a[i][N-1] = 2 * c[i][N-1];
d[i][N-1] = 2 * c[i][N-1] * b[i][N-1];
}
}
|
QuicksortKernels.h | /*
Copyright (c) 2014-2019, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUICKSORT_KNL_H
#define QUICKSORT_KNL_H
#include "Quicksort.h"
#pragma omp declare target
template <typename T>
void plus_prescan( T *a, T *b) {
T av = *a;
T bv = *b;
*a = bv;
*b = bv + av;
}
/// bitonic_sort: sort 2*LOCAL_THREADCOUNT elements
template <typename T>
void bitonic_sort( T* sh_data, const uint localid)
{
for (uint ulevel = 1; ulevel < LQSORT_LOCAL_WORKGROUP_SIZE; ulevel <<= 1) {
for (uint j = ulevel; j > 0; j >>= 1) {
uint pos = 2*localid - (localid & (j - 1));
uint direction = localid & ulevel;
uint av = sh_data[pos], bv = sh_data[pos + j];
const bool sortThem = av > bv;
const uint greater = Select(bv, av, sortThem);
const uint lesser = Select(av, bv, sortThem);
sh_data[pos] = Select(lesser, greater, direction);
sh_data[pos + j] = Select(greater, lesser, direction);
#pragma omp barrier
}
}
for (uint j = LQSORT_LOCAL_WORKGROUP_SIZE; j > 0; j >>= 1) {
uint pos = 2*localid - (localid & (j - 1));
uint av = sh_data[pos], bv = sh_data[pos + j];
const bool sortThem = av > bv;
sh_data[pos] = Select(av, bv, sortThem);
sh_data[pos + j] = Select(bv, av, sortThem);
#pragma omp barrier
}
}
template <typename T>
void sort_threshold( T* data_in,
T* data_out,
uint start,
uint end,
T* temp,
uint localid)
{
uint tsum = end - start;
if (tsum == SORT_THRESHOLD) {
bitonic_sort(data_in+start, localid);
for (uint i = localid; i < SORT_THRESHOLD; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
data_out[start + i] = data_in[start + i];
}
} else if (tsum > 1) {
for (uint i = localid; i < SORT_THRESHOLD; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
if (i < tsum) {
temp[i] = data_in[start + i];
} else {
temp[i] = UINT_MAX;
}
}
#pragma omp barrier
bitonic_sort(temp, localid);
for (uint i = localid; i < tsum; i += LQSORT_LOCAL_WORKGROUP_SIZE) {
data_out[start + i] = temp[i];
}
} else if (tsum == 1 && localid == 0) {
data_out[start] = data_in[start];
}
}
#pragma omp end declare target
// record to push start of the sequence, end of the sequence and direction of sorting on internal stack
typedef struct workstack_record {
uint start;
uint end;
uint direction;
} workstack_record;
#define PUSH(START, END) if (localid == 0) { \
++workstack_pointer; \
workstack_record wr = { (START), (END), direction ^ 1 }; \
workstack[workstack_pointer] = wr; \
}
#endif
|
critical_flet.h | /*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
critical flet.cpp
Abstract:
Version of flet using "omp critical" directive.
Warning: it uses omp critical section "critical_flet"
Author:
Leonardo de Moura (leonardo) 2011-05-12
Revision History:
--*/
#ifndef _CRITICAL_FLET_H_
#define _CRITICAL_FLET_H_
template<typename T>
class critical_flet {
T & m_ref;
T m_old_value;
public:
critical_flet(T & ref, const T & new_value):
m_ref(ref),
m_old_value(ref) {
#pragma omp critical (critical_flet)
{
m_ref = new_value;
}
}
~critical_flet() {
#pragma omp critical (critical_flet)
{
m_ref = m_old_value;
}
}
};
#endif
|
elect_energy_omp.c | /* --- File elect_energy_omp.c --- */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
int main(int argc, char **argv) {
struct timespec ts_start, ts_end;
int size = 60;
int n_charges = size*size*size;
float scale=0.5;
float *charge, *x, *y, *z;
float **M;
int i,j,k;
float time_total;
charge=malloc(n_charges*sizeof(float));
x=malloc(n_charges*sizeof(float));
y=malloc(n_charges*sizeof(float));
z=malloc(n_charges*sizeof(float));
M=(float**)malloc(n_charges*sizeof(float*));
for (i=0;i<n_charges;i++)
M[i]=malloc(n_charges*sizeof(float));
/* initialize x,y,z coordinates and charges */
int n=0;
for (i=0; i<size; i++)
for (j=0; j<size; j++)
for (k=0; k<size; k++) {
x[n]=i*scale;
y[n]=j*scale;
z[n]=k*scale;
charge[n]=0.33;
n++;
}
clock_gettime(CLOCK_MONOTONIC, &ts_start);
// Calculate electrostatic energy: sum of charge[i]*charge[j]/dist[i,j] */
float dx, dy, dz, dist;
double Energy=0.0f;
#pragma omp parallel for private(j,dx,dy,dz,dist) reduction(+:Energy) schedule(static,50)
for (i = 0; i < n_charges; i++) {
for (j = i+1; j < n_charges; j++) {
dx = x[i]-x[j];
dy = y[i]-y[j];
dz = z[i]-z[j];
dist=sqrt(dx*dx + dy*dy + dz*dz);
Energy += charge[i]*charge[j]/dist;
}
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
time_total = (ts_end.tv_sec - ts_start.tv_sec)*1000000000 + (ts_end.tv_nsec - ts_start.tv_nsec);
printf("\nTotal time is %f ms, Energy is %f\n", time_total/1000000, Energy);
}
|
gather.c | #include "../../shared.h"
#include "hale.h"
#include <float.h>
#include <math.h>
#include <stdio.h>
// Gathers all of the subcell quantities on the mesh
void gather_subcell_mass_and_energy(
const int ncells, const int nnodes, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
int* cells_to_nodes_offsets, const double* nodes_x, const double* nodes_y,
const double* nodes_z, const double* cell_volume, double* energy,
double* density, double* velocity_x, double* velocity_y, double* velocity_z,
double* ke_mass, double* cell_mass, double* subcell_mass,
double* subcell_volume, double* subcell_ie_mass, double* subcell_ke_mass,
double* subcell_centroids_x, double* subcell_centroids_y,
double* subcell_centroids_z, int* faces_to_cells0, int* faces_to_cells1,
int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes,
int* nodes_to_cells_offsets, int* nodes_to_cells, double* initial_mass,
double* initial_ie_mass, double* initial_ke_mass);
// Gathers the momentum into the subcells
void gather_subcell_momentum(
const int nnodes, const double* nodal_volumes, const double* nodal_mass,
int* nodes_to_cells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* velocity_x, double* velocity_y,
double* velocity_z, double* subcell_volume, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
double* subcell_momentum_x, double* subcell_momentum_y,
double* subcell_momentum_z, double* subcell_centroids_x,
double* subcell_centroids_y, double* subcell_centroids_z,
int* nodes_to_cells_offsets, int* cells_to_nodes_offsets,
int* cells_to_nodes, int* nodes_to_nodes_offsets, int* nodes_to_nodes,
vec_t* initial_momentum);
// gathers all of the subcell quantities on the mesh
void gather_subcell_quantities(UnstructuredMesh* umesh, HaleData* hale_data,
vec_t* initial_momentum, double* initial_mass,
double* initial_ie_mass,
double* initial_ke_mass) {
/*
* GATHERING STAGE OF THE REMAP
*/
// Calculates the cell volume, subcell volume and the subcell centroids
calc_volumes_centroids(
umesh->ncells, umesh->nnodes, hale_data->nnodes_by_subcell,
umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
hale_data->subcells_to_faces_offsets, hale_data->subcells_to_faces,
umesh->faces_to_nodes, umesh->faces_to_nodes_offsets,
umesh->faces_cclockwise_cell, umesh->nodes_x0, umesh->nodes_y0,
umesh->nodes_z0, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
hale_data->subcell_volume, hale_data->cell_volume,
hale_data->nodal_volumes, umesh->nodes_to_cells_offsets, umesh->nodes_to_cells);
// Gathers all of the subcell quantities on the mesh
gather_subcell_mass_and_energy(
umesh->ncells, umesh->nnodes, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z, umesh->cells_to_nodes_offsets,
umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0, hale_data->cell_volume,
hale_data->energy0, hale_data->density0, hale_data->velocity_x0,
hale_data->velocity_y0, hale_data->velocity_z0, hale_data->ke_mass,
hale_data->cell_mass, hale_data->subcell_mass, hale_data->subcell_volume,
hale_data->subcell_ie_mass, hale_data->subcell_ke_mass,
hale_data->subcell_centroids_x, hale_data->subcell_centroids_y,
hale_data->subcell_centroids_z, umesh->faces_to_cells0,
umesh->faces_to_cells1, umesh->cells_to_faces_offsets,
umesh->cells_to_faces, umesh->cells_to_nodes, umesh->nodes_to_cells_offsets,
umesh->nodes_to_cells, initial_mass, initial_ie_mass, initial_ke_mass);
// Gathers the momentum the subcells
gather_subcell_momentum(
umesh->nnodes, hale_data->nodal_volumes, hale_data->nodal_mass,
umesh->nodes_to_cells, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0,
hale_data->velocity_x0, hale_data->velocity_y0, hale_data->velocity_z0,
hale_data->subcell_volume, umesh->cell_centroids_x,
umesh->cell_centroids_y, umesh->cell_centroids_z,
hale_data->subcell_momentum_x, hale_data->subcell_momentum_y,
hale_data->subcell_momentum_z, hale_data->subcell_centroids_x,
hale_data->subcell_centroids_y, hale_data->subcell_centroids_z,
umesh->nodes_to_cells_offsets, umesh->cells_to_nodes_offsets, umesh->cells_to_nodes,
umesh->nodes_to_nodes_offsets, umesh->nodes_to_nodes, initial_momentum);
}
// Gathers all of the subcell quantities on the mesh
void gather_subcell_mass_and_energy(
const int ncells, const int nnodes, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
int* cells_to_nodes_offsets, const double* nodes_x, const double* nodes_y,
const double* nodes_z, const double* cell_volume, double* energy,
double* density, double* velocity_x, double* velocity_y, double* velocity_z,
double* ke_mass, double* cell_mass, double* subcell_mass,
double* subcell_volume, double* subcell_ie_mass, double* subcell_ke_mass,
double* subcell_centroids_x, double* subcell_centroids_y,
double* subcell_centroids_z, int* faces_to_cells0, int* faces_to_cells1,
int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes,
int* nodes_to_cells_offsets, int* nodes_to_cells, double* initial_mass,
double* initial_ie_mass, double* initial_ke_mass) {
double total_mass = 0.0;
double total_ie_mass = 0.0;
double total_ke_mass = 0.0;
// We first have to determine the cell centered kinetic energy
#pragma omp parallel for reduction(+ : total_ke_mass)
for (int cc = 0; cc < ncells; ++cc) {
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
ke_mass[(cc)] = 0.0;
// Subcells are ordered with the nodes on a face
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
const int subcell_index = cell_to_nodes_off + nn;
ke_mass[(cc)] += subcell_mass[(subcell_index)] * 0.5 *
(velocity_x[(node_index)] * velocity_x[(node_index)] +
velocity_y[(node_index)] * velocity_y[(node_index)] +
velocity_z[(node_index)] * velocity_z[(node_index)]);
}
total_ke_mass += ke_mass[(cc)];
}
double total_ie_in_subcells = 0.0;
double total_ke_in_subcells = 0.0;
// Calculate the sub-cell internal and kinetic energies
#pragma omp parallel for reduction(+ : total_mass, total_ie_mass, \
total_ie_in_subcells, total_ke_in_subcells)
for (int cc = 0; cc < ncells; ++cc) {
// Calculating the volume dist necessary for the least squares
// regression
const int cell_to_faces_off = cells_to_faces_offsets[(cc)];
const int nfaces_by_cell =
cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off;
const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off;
const double cell_ie = density[(cc)] * energy[(cc)];
const double cell_ke = ke_mass[(cc)] / cell_volume[(cc)];
vec_t cell_c = {0.0, 0.0, 0.0};
calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes,
cell_to_nodes_off, &cell_c);
vec_t ie_rhs = {0.0, 0.0, 0.0};
vec_t ke_rhs = {0.0, 0.0, 0.0};
vec_t coeff[3] = {{0.0, 0.0, 0.0}};
total_mass += cell_mass[(cc)];
total_ie_mass += cell_mass[(cc)] * energy[(cc)];
// Determine the weighted volume dist for neighbouring cells
double gmax_ie = -DBL_MAX;
double gmin_ie = DBL_MAX;
double gmax_ke = -DBL_MAX;
double gmin_ke = DBL_MAX;
for (int ff = 0; ff < nfaces_by_cell; ++ff) {
const int face_index = cells_to_faces[(cell_to_faces_off + ff)];
const int neighbour_index = (faces_to_cells0[(face_index)] == cc)
? faces_to_cells1[(face_index)]
: faces_to_cells0[(face_index)];
// Check if boundary face
if (neighbour_index == -1) {
continue;
}
vec_t dist = {cell_centroids_x[(neighbour_index)] - cell_c.x,
cell_centroids_y[(neighbour_index)] - cell_c.y,
cell_centroids_z[(neighbour_index)] - cell_c.z};
// Store the neighbouring cell's contribution to the coefficients
double neighbour_vol = cell_volume[(neighbour_index)];
coeff[0].x += 2.0 * (dist.x * dist.x) / (neighbour_vol * neighbour_vol);
coeff[0].y += 2.0 * (dist.x * dist.y) / (neighbour_vol * neighbour_vol);
coeff[0].z += 2.0 * (dist.x * dist.z) / (neighbour_vol * neighbour_vol);
coeff[1].x += 2.0 * (dist.y * dist.x) / (neighbour_vol * neighbour_vol);
coeff[1].y += 2.0 * (dist.y * dist.y) / (neighbour_vol * neighbour_vol);
coeff[1].z += 2.0 * (dist.y * dist.z) / (neighbour_vol * neighbour_vol);
coeff[2].x += 2.0 * (dist.z * dist.x) / (neighbour_vol * neighbour_vol);
coeff[2].y += 2.0 * (dist.z * dist.y) / (neighbour_vol * neighbour_vol);
coeff[2].z += 2.0 * (dist.z * dist.z) / (neighbour_vol * neighbour_vol);
const double neighbour_ie =
density[(neighbour_index)] * energy[(neighbour_index)];
const double neighbour_ke = ke_mass[(neighbour_index)] / neighbour_vol;
gmax_ie = max(gmax_ie, neighbour_ie);
gmin_ie = min(gmin_ie, neighbour_ie);
gmax_ke = max(gmax_ke, neighbour_ke);
gmin_ke = min(gmin_ke, neighbour_ke);
// Prepare the RHS, which includes energy differential
const double die = (neighbour_ie - cell_ie);
const double dke = (neighbour_ke - cell_ke);
ie_rhs.x += 2.0 * (dist.x * die) / neighbour_vol;
ie_rhs.y += 2.0 * (dist.y * die) / neighbour_vol;
ie_rhs.z += 2.0 * (dist.z * die) / neighbour_vol;
ke_rhs.x += 2.0 * (dist.x * dke) / neighbour_vol;
ke_rhs.y += 2.0 * (dist.y * dke) / neighbour_vol;
ke_rhs.z += 2.0 * (dist.z * dke) / neighbour_vol;
}
// Determine the inverse of the coefficient matrix
vec_t inv[3];
calc_3x3_inverse(&coeff, &inv);
// Solve for the internal and kinetic energy gradients
vec_t grad_ie = {
inv[0].x * ie_rhs.x + inv[0].y * ie_rhs.y + inv[0].z * ie_rhs.z,
inv[1].x * ie_rhs.x + inv[1].y * ie_rhs.y + inv[1].z * ie_rhs.z,
inv[2].x * ie_rhs.x + inv[2].y * ie_rhs.y + inv[2].z * ie_rhs.z};
vec_t grad_ke = {
inv[0].x * ke_rhs.x + inv[0].y * ke_rhs.y + inv[0].z * ke_rhs.z,
inv[1].x * ke_rhs.x + inv[1].y * ke_rhs.y + inv[1].z * ke_rhs.z,
inv[2].x * ke_rhs.x + inv[2].y * ke_rhs.y + inv[2].z * ke_rhs.z};
// Calculate the limiter for the gradient
double limiter = 1.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
limiter = min(limiter, calc_cell_limiter(cell_ie, gmax_ie, gmin_ie,
&grad_ie, nodes_x[(node_index)],
nodes_y[(node_index)],
nodes_z[(node_index)], &cell_c));
}
// This stops extrema from worsening as part of the gather. Is it
// conservative?
grad_ie.x *= limiter;
grad_ie.y *= limiter;
grad_ie.z *= limiter;
// Calculate the limiter for the gradient
limiter = 1.0;
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)];
limiter = min(limiter, calc_cell_limiter(cell_ke, gmax_ke, gmin_ke,
&grad_ke, nodes_x[(node_index)],
nodes_y[(node_index)],
nodes_z[(node_index)], &cell_c));
}
// This stops extrema from worsening as part of the gather. Is it
// conservative?
grad_ie.x *= limiter;
grad_ie.y *= limiter;
grad_ie.z *= limiter;
// Subcells are ordered with the nodes on a face
for (int nn = 0; nn < nnodes_by_cell; ++nn) {
const int subcell_index = cell_to_nodes_off + nn;
// Calculate the center of mass distance
const double dx = subcell_centroids_x[(subcell_index)] - cell_c.x;
const double dy = subcell_centroids_y[(subcell_index)] - cell_c.y;
const double dz = subcell_centroids_z[(subcell_index)] - cell_c.z;
// Subcell internal and kinetic energy from linear function at cell
subcell_ie_mass[(subcell_index)] =
subcell_volume[(subcell_index)] *
(cell_ie + grad_ie.x * dx + grad_ie.y * dy + grad_ie.z * dz);
subcell_ke_mass[(subcell_index)] =
subcell_volume[(subcell_index)] *
(cell_ke + grad_ke.x * dx + grad_ke.y * dy + grad_ke.z * dz);
total_ie_in_subcells += subcell_ie_mass[(subcell_index)];
total_ke_in_subcells += subcell_ke_mass[(subcell_index)];
if (subcell_ie_mass[(subcell_index)] < -EPS ||
subcell_ke_mass[(subcell_index)] < -EPS) {
printf("Negative energy mass %d %.12f %.12f\n", subcell_index,
subcell_ie_mass[(subcell_index)],
subcell_ke_mass[(subcell_index)]);
}
}
}
*initial_mass = total_mass;
*initial_ie_mass = total_ie_in_subcells;
*initial_ke_mass = total_ke_in_subcells;
printf("Total Energy in Cells %.12f\n", total_ie_mass + total_ke_mass);
printf("Total Energy in Subcells %.12f\n",
total_ie_in_subcells + total_ke_in_subcells);
printf("Difference %.12f\n\n",
(total_ie_mass + total_ke_mass) -
(total_ie_in_subcells + total_ke_in_subcells));
}
// Gathers the momentum into the subcells
void gather_subcell_momentum(
const int nnodes, const double* nodal_volumes, const double* nodal_mass,
int* nodes_to_cells, const double* nodes_x, const double* nodes_y,
const double* nodes_z, double* velocity_x, double* velocity_y,
double* velocity_z, double* subcell_volume, double* cell_centroids_x,
double* cell_centroids_y, double* cell_centroids_z,
double* subcell_momentum_x, double* subcell_momentum_y,
double* subcell_momentum_z, double* subcell_centroids_x,
double* subcell_centroids_y, double* subcell_centroids_z,
int* nodes_to_cells_offsets, int* cells_to_nodes_offsets,
int* cells_to_nodes, int* nodes_to_nodes_offsets, int* nodes_to_nodes,
vec_t* initial_momentum) {
double initial_momentum_x = 0.0;
double initial_momentum_y = 0.0;
double initial_momentum_z = 0.0;
double total_subcell_vx = 0.0;
double total_subcell_vy = 0.0;
double total_subcell_vz = 0.0;
#pragma omp parallel for reduction(+ : initial_momentum_x, initial_momentum_y, \
initial_momentum_z, total_subcell_vx, \
total_subcell_vy, total_subcell_vz)
for (int nn = 0; nn < nnodes; ++nn) {
// Calculate the gradient for the nodal momentum
vec_t rhsx = {0.0, 0.0, 0.0};
vec_t rhsy = {0.0, 0.0, 0.0};
vec_t rhsz = {0.0, 0.0, 0.0};
vec_t coeff[3] = {{0.0, 0.0, 0.0}};
vec_t gmin = {DBL_MAX, DBL_MAX, DBL_MAX};
vec_t gmax = {-DBL_MAX, -DBL_MAX, -DBL_MAX};
vec_t node = {nodes_x[(nn)], nodes_y[(nn)], nodes_z[(nn)]};
const double nodal_density = nodal_mass[(nn)] / nodal_volumes[(nn)];
vec_t node_mom_density = {nodal_density * velocity_x[(nn)],
nodal_density * velocity_y[(nn)],
nodal_density * velocity_z[(nn)]};
initial_momentum_x += nodal_mass[(nn)] * velocity_x[(nn)];
initial_momentum_y += nodal_mass[(nn)] * velocity_y[(nn)];
initial_momentum_z += nodal_mass[(nn)] * velocity_z[(nn)];
const int node_to_nodes_off = nodes_to_nodes_offsets[(nn)];
const int nnodes_by_node =
nodes_to_nodes_offsets[(nn + 1)] - node_to_nodes_off;
for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) {
const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)];
if (neighbour_index == -1) {
continue;
}
// Calculate the center of mass distance
vec_t i = {nodes_x[(neighbour_index)] - node.x,
nodes_y[(neighbour_index)] - node.y,
nodes_z[(neighbour_index)] - node.z};
// Store the neighbouring cell's contribution to the coefficients
double neighbour_vol = nodal_volumes[(neighbour_index)];
coeff[0].x += 2.0 * (i.x * i.x) / (neighbour_vol * neighbour_vol);
coeff[0].y += 2.0 * (i.x * i.y) / (neighbour_vol * neighbour_vol);
coeff[0].z += 2.0 * (i.x * i.z) / (neighbour_vol * neighbour_vol);
coeff[1].x += 2.0 * (i.y * i.x) / (neighbour_vol * neighbour_vol);
coeff[1].y += 2.0 * (i.y * i.y) / (neighbour_vol * neighbour_vol);
coeff[1].z += 2.0 * (i.y * i.z) / (neighbour_vol * neighbour_vol);
coeff[2].x += 2.0 * (i.z * i.x) / (neighbour_vol * neighbour_vol);
coeff[2].y += 2.0 * (i.z * i.y) / (neighbour_vol * neighbour_vol);
coeff[2].z += 2.0 * (i.z * i.z) / (neighbour_vol * neighbour_vol);
const double neighbour_nodal_density =
nodal_mass[(neighbour_index)] / nodal_volumes[(neighbour_index)];
vec_t neighbour_mom_density = {
neighbour_nodal_density * velocity_x[(neighbour_index)],
neighbour_nodal_density * velocity_y[(neighbour_index)],
neighbour_nodal_density * velocity_z[(neighbour_index)]};
gmax.x = max(gmax.x, neighbour_mom_density.x);
gmin.x = min(gmin.x, neighbour_mom_density.x);
gmax.y = max(gmax.y, neighbour_mom_density.y);
gmin.y = min(gmin.y, neighbour_mom_density.y);
gmax.z = max(gmax.z, neighbour_mom_density.z);
gmin.z = min(gmin.z, neighbour_mom_density.z);
vec_t dv = {(neighbour_mom_density.x - node_mom_density.x),
(neighbour_mom_density.y - node_mom_density.y),
(neighbour_mom_density.z - node_mom_density.z)};
rhsx.x += 2.0 * i.x * dv.x / neighbour_vol;
rhsx.y += 2.0 * i.y * dv.x / neighbour_vol;
rhsx.z += 2.0 * i.z * dv.x / neighbour_vol;
rhsy.x += 2.0 * i.x * dv.y / neighbour_vol;
rhsy.y += 2.0 * i.y * dv.y / neighbour_vol;
rhsy.z += 2.0 * i.z * dv.y / neighbour_vol;
rhsz.x += 2.0 * i.x * dv.z / neighbour_vol;
rhsz.y += 2.0 * i.y * dv.z / neighbour_vol;
rhsz.z += 2.0 * i.z * dv.z / neighbour_vol;
}
// Determine the inverse of the coefficient matrix
vec_t inv[3];
calc_3x3_inverse(&coeff, &inv);
// Solve for the velocity density gradients
vec_t grad_vx = {inv[0].x * rhsx.x + inv[0].y * rhsx.y + inv[0].z * rhsx.z,
inv[1].x * rhsx.x + inv[1].y * rhsx.y + inv[1].z * rhsx.z,
inv[2].x * rhsx.x + inv[2].y * rhsx.y + inv[2].z * rhsx.z};
vec_t grad_vy = {inv[0].x * rhsy.x + inv[0].y * rhsy.y + inv[0].z * rhsy.z,
inv[1].x * rhsy.x + inv[1].y * rhsy.y + inv[1].z * rhsy.z,
inv[2].x * rhsy.x + inv[2].y * rhsy.y + inv[2].z * rhsy.z};
vec_t grad_vz = {inv[0].x * rhsz.x + inv[0].y * rhsz.y + inv[0].z * rhsz.z,
inv[1].x * rhsz.x + inv[1].y * rhsz.y + inv[1].z * rhsz.z,
inv[2].x * rhsz.x + inv[2].y * rhsz.y + inv[2].z * rhsz.z};
// Limit the gradients
double vx_limiter = 1.0;
double vy_limiter = 1.0;
double vz_limiter = 1.0;
const int node_to_cells_off = nodes_to_cells_offsets[(nn)];
const int ncells_by_node =
nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off;
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
vec_t cell_c = {cell_centroids_x[(cell_index)],
cell_centroids_y[(cell_index)],
cell_centroids_z[(cell_index)]};
vx_limiter =
min(vx_limiter,
calc_cell_limiter(node_mom_density.x, gmax.x, gmin.x, &grad_vx,
cell_c.x, cell_c.y, cell_c.z, &node));
vy_limiter =
min(vy_limiter,
calc_cell_limiter(node_mom_density.y, gmax.y, gmin.y, &grad_vy,
cell_c.x, cell_c.y, cell_c.z, &node));
vz_limiter =
min(vz_limiter,
calc_cell_limiter(node_mom_density.z, gmax.z, gmin.z, &grad_vz,
cell_c.x, cell_c.y, cell_c.z, &node));
}
// This stops extrema from worsening as part of the gather. Is it
// conservative?
grad_vx.x *= vx_limiter;
grad_vx.y *= vx_limiter;
grad_vx.z *= vx_limiter;
grad_vy.x *= vy_limiter;
grad_vy.y *= vy_limiter;
grad_vy.z *= vy_limiter;
grad_vz.x *= vz_limiter;
grad_vz.y *= vz_limiter;
grad_vz.z *= vz_limiter;
for (int cc = 0; cc < ncells_by_node; ++cc) {
const int cell_index = nodes_to_cells[(node_to_cells_off + cc)];
const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)];
const int nnodes_by_cell =
cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off;
// Determine the position of the node in the cell
int nn2;
for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) {
if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) {
break;
}
}
const int subcell_index = cell_to_nodes_off + nn2;
const double vol = subcell_volume[(subcell_index)];
const double dx = subcell_centroids_x[(subcell_index)] - nodes_x[(nn)];
const double dy = subcell_centroids_y[(subcell_index)] - nodes_y[(nn)];
const double dz = subcell_centroids_z[(subcell_index)] - nodes_z[(nn)];
subcell_momentum_x[(subcell_index)] =
vol * (node_mom_density.x + grad_vx.x * dx + grad_vx.y * dy +
grad_vx.z * dz);
subcell_momentum_y[(subcell_index)] =
vol * (node_mom_density.y + grad_vy.x * dx + grad_vy.y * dy +
grad_vy.z * dz);
subcell_momentum_z[(subcell_index)] =
vol * (node_mom_density.z + grad_vz.x * dx + grad_vz.y * dy +
grad_vz.z * dz);
total_subcell_vx += subcell_momentum_x[(subcell_index)];
total_subcell_vy += subcell_momentum_y[(subcell_index)];
total_subcell_vz += subcell_momentum_z[(subcell_index)];
}
}
initial_momentum->x = total_subcell_vx;
initial_momentum->y = total_subcell_vy;
initial_momentum->z = total_subcell_vz;
printf("Total Momentum in Cells (%.12f,%.12f,%.12f)\n", initial_momentum_x,
initial_momentum_y, initial_momentum_z);
printf("Total Momentum in Subcells (%.12f,%.12f,%.12f)\n", total_subcell_vx,
total_subcell_vy, total_subcell_vz);
printf("Difference (%.12f,%.12f,%.12f)\n\n",
initial_momentum_x - total_subcell_vx,
initial_momentum_y - total_subcell_vy,
initial_momentum_z - total_subcell_vz);
}
|
eta.c | #include<stdio.h>
#include "gdal.h"
#include<omp.h>
#include "cpl_string.h"
/* mod16A2H MODLAND_QC bits [0-1]
* 0 -> class 0: ETA produced, Good quality (main algorithm with or without saturation)
* 1 -> class 1: ETA produced, Other Quality (back-up algorithm or fill values)
*/
#define NODATA 32767
#define Null 1000000000
int mod16A2Ha(int pixel) {
return (pixel & 0x01);
}
void usage()
{
printf( "-----------------------------------------\n");
printf( "--Modis Processing chain--OpenMP code----\n");
printf( "-----------------------------------------\n");
printf( "./eta inETA inETA_QA\n");
printf( "\toutETA\n");
printf( "\t[Offset Scale]\n");
printf( "-----------------------------------------\n");
printf( "inETA\t\tModis MOD16A2H ETA 1000m\n");
printf( "inETA_QA\t\tModis MOD16A2H FparLai_QC\n");
printf( "outETA\tQA corrected ETA output [-]\n");
printf( "Offset\t Optional offset (DN2ETA)\n");
printf( "Scale\t Optional scale (DN2ETA)\n");
return;
}
int main( int argc, char *argv[] )
{
if( argc < 4 ) {
usage();
return 1;
}
char *inB2 = argv[1]; //ETA
char *inB3 = argv[2]; //ETA_QA
char *etaF = argv[3]; // Corrected ETA
float offset=Null, scale=Null;
if(argv[4] != NULL && argv[5] != NULL){
offset = atof(argv[4]); // Optional Offset (offset+DN*scale)
scale = atof(argv[5]); // Optional scale (offset+DN*scale)
}
GDALAllRegister();
GDALDatasetH hD2 = GDALOpen(inB2,GA_ReadOnly);//ETA
GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//ETA_QA
if(hD2==NULL||hD3==NULL){
printf("One or more input files ");
printf("could not be loaded\n");
exit(1);
}
GDALDriverH hDr2 = GDALGetDatasetDriver(hD2);
char **options = NULL;
options = CSLSetNameValue( options, "TILED", "YES" );
options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" );
options = CSLSetNameValue( options, "PREDICTOR", "2" );
GDALDatasetH hDOut = GDALCreateCopy(hDr2,etaF,hD2,FALSE,options,NULL,NULL);
GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1);
GDALSetRasterNoDataValue(hBOut, NODATA);
GDALRasterBandH hB2 = GDALGetRasterBand(hD2,1);//ETA
GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//ETA_QA
int nX = GDALGetRasterBandXSize(hB2);
int nY = GDALGetRasterBandYSize(hB2);
int N=nX*nY;
float *l2 = (float *) malloc(sizeof(float)*N);
float *l3 = (float *) malloc(sizeof(float)*N);
float *lOut = (float *) malloc(sizeof(float)*N);
int rc, qa;
//ETA 1Km
int err = 0;
err=GDALRasterIO(hB2,GF_Read,0,0,nX,nY,l2,nX,nY,GDT_Float32,0,0);
//ETA_QA 1Km
err=GDALRasterIO(hB3,GF_Read,0,0,nX,nY,l3,nX,nY,GDT_Float32,0,0);
#pragma omp parallel for default(none) \
private (rc, qa) shared (N, l2, l3, lOut, offset, scale)
for(rc=0;rc<N;rc++){
qa=mod16A2Ha(l3[rc]);
if( qa != 0) lOut[rc] = NODATA;
if(offset!=Null && scale!=Null){
lOut[rc] = offset + l2[rc] * scale;
}
else lOut[rc] = l2[rc];
}
#pragma omp barrier
err=GDALRasterIO(hBOut,GF_Write,0,0,nX,nY,lOut,nX,nY,GDT_Float32,0,0);
err=err+1;
if( l2 != NULL ) free( l2 );
if( l3 != NULL ) free( l3 );
GDALClose(hD2);
GDALClose(hD3);
GDALClose(hDOut);
return(EXIT_SUCCESS);
}
|
ssh_ng_fmt_plug.c | /* Fast cracker for SSH RSA / DSA key files. Hacked together during October
* of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Support for cracking new openssh key format (bcrypt pbkdf) was added by
* m3g9tr0n (Spiros Fraganastasis) and Dhiru Kholia in September of 2014. This
* is dedicated to Raquel :-)
*
* Ideas borrowed from SSH2 protocol library, http://pypi.python.org/pypi/ssh
* Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org>
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#include "arch.h"
#if !AC_BUILT || HAVE_BIO_NEW
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sshng;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sshng);
#else
#include <string.h>
#include "aes.h"
#include <openssl/des.h>
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // Tuned K8-dual HT
#endif
#endif
#include "jumbo.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "stdint.h"
#include "md5.h"
#include "memdbg.h"
#include "asn1.h"
#define FORMAT_LABEL "SSH-ng"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "RSA/DSA/EC/OPENSSH (SSH private keys) 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define PLAINTEXT_LENGTH 32 // XXX
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
// openssl asn1parse -in test_dsa.key; openssl asn1parse -in test_rsa.key
#define SAFETY_FACTOR 16 // enough to verify the initial ASN.1 structure (SEQUENCE, INTEGER, Big INTEGER) of RSA, and DSA keys?
#define N 8192
static struct fmt_tests sshng_tests[] = {
{"$sshng$1$16$570F498F6FF732775EE38648130F600D$1200$1777f12047d4ebab06d052d52946e5e0e73b41d5077b20e1ffe1c97ef9459b8c6844fecc24fdf63314c8889398fa140026339c85336278600e299c0f4c236648ca684f0c122e66d3e860e19eab8b46a564eb101def1c6a38f2f1800040c6b59a66e7b86e145e180f8a126e46544be1e17dd32e4e72f735c9e6b0ca4bbbb32ccf34ba0a7827858b0be32f9e53f13466e2ac78c3fecdf2a51cd7871286a3a91f9c71ae9e857a74bcc06071af6f60d827f7e13ccf6c1be722246c0796f509744c2b1b1452315ea6f86a1c8765d1f0c1d795349b4ea1ba229318b392fe505292cd0c6b4e3e9b2acc13b96943d92fa5635e05b7795989906274b0fb1894102d07facdd8f2122299960e1490823d62bbd5bf6d6c92ed26e68cc2edc93fbffec557a5d187fffe085ded9408ac63293851a684ca10d6e9a4ee9b5c552c827caee1f1c41870fe2d0e79bc4a0b85478fa82a58f947d345122c8ac7c80ba2ae8452b093dda70e2a4329fce70af9cf98e19477a622083664d1e62393a01b20371fc5be9390059f1c4af75d5448a2fbe1aaa46701c696afec927c67d15c046036531d9252faa08bbf9ea0e019ea574e6af94edd7ec17c83c0f87e34c7456e19bc53b2de04dafa83267694c1f61d038e0fc5f8f1b8ce573da470e6db6d38c0e8f7141ad9e9609ea408e3823271e987766039d484bc88f23f2f2a1175636ece950c7d82f43726287fef37da945ec6ad6adc04cb59f66087f68a3e84e8cc39c578bcbce3aaf67f1325d3d20dbd5872cc88ab72fc0bda05bf969eca08f8cafb306424a1597ba5d612e155b4723c2c1bee9a8e3d195be3b798ea417008a2340a919e23ac899ea4dbc4ef05af2cf6b12293eeb293584b37d3f8465e36a62d65b21f68725603e11dc14acf4e3855e25980387a34a34919fdd49844ed888e37199bb26df1bbbc303e895615fcbb0aa9ddc8a2aa685da942a1e68dc3a355d27236f74220d404d25e0ac64ae9203bb04296b4d67481a4f516fd22e47092073c9c44fa098670d736c5c509e55d6b40d3bf346ea5bb0007e32e9d8290c2633621fd84c2f5f428a5649ff3a16d00fec21381543202f2ee12078ddea8a371935f2ffa15aafa644e111a29c1c4703bf8e9cf1397356e296c5484558b96639b9cf3703aabff0cf42864dab91b1e09c6439159bc95374da7a5d416402286390e76cb766cd94e7a002596e8862b8d7e46c1fc6f7bdd0b93c73d2dc3cf58ea31bc549086209f450bb7460d5e9ba0d0f7b80337651f45bf83bef1783c3a15631c82428bfe167dc0692402d7f15144fff01ad8596970439ce8a2df0107c85a23ef93edd19f62de499ab58ada581886494c3e52dd5ec53c191f6d62729729a252c2c0d8024950d1637cfd7c61a4fe64ce41cde76fe00fa2607af66a44d3b4b8836820f40c03669f08b4e986f4d03c09e3c026a910f83be623d7f68ff80d81662f020f433f7a896e10134a278cd9a8517d3bcd77c5287f7d41bc52d2f8db79b5f8f9ed6d6f45a482b13cb91ecdef43ebe38f5ad71836185ae6faf1dd11c50cc1759e4834fcab2b3523d4224a32d2eaba224a2c950dac7524afc74f02f17b511f3b22d577a6928d40909bed64f6ed27096dff591a8fbee3f32733fd2b36c0c4708a5224f165af000d93832e211ae52465f680e7a4fd66bb5eb210c4402eb58f6ebfde", "strongpassword"},
{"$sshng$0$8$DAA422E8A5A8EFB7$608$fa7b2c1c699697dd487261a213a0dd088a86bc03f4e2db8b87ad302e3581bdd8ed17d0a3ced3e7179ef17beea9064ee862017f472de293d655f6b1cd7115e27c328cf5caf1b5896952590cd82d123fcf6c5da3b43f5435c829ebb595300c828e04d57c7ade57efe006305b32fe79afd0d14cadba681b4dc3a69b25a1e71ddbd353465217c311d11721f1cba05d1226ff0e7d261156f0837753bcaaddfec383591f61470a4318cf679046d43490a1eef33014a90865917ccaa16f986724b8ee421d990327a46410362b4992406af41a88e3c5e5bbb7707ba08517e7ac8295ad0b934c38968f05fd372f1ee29e24eddcbbacba5b3e1b7150e51ba4e17b4f54319630e2d5372adc46e4de437f64b3d11670eb25fc94c7e9bd0579806bbf16c6cfe529a4bc0d3918ca4777f8418e789163660d9bbe0aa297857ee4922dffe310e6967fba2ee2e06707d9bbd9c8601bad7ccfdcb8a948074de511be7d588b7b71d4b5f0b1e19020b54efc4d626b2e4d85c0a40682517128b9ecc29f882996f4f6b655bb1986e293cb5271fe98c61d8b2e6e8338fee42f22674fc8b2da475663ba19644e7de76927cd9e333b533ad7617cc7a9f19dc7c00c240ed92c2fb1aaf6495bd16ab9fae4650567ad8b175d02f9e6a9737362168035670017fd9ad87cf4e916f47baa5efe0d04939295fba608f83fa811b946d12afe77836dc6d0d398824a355926ce5848dace776c7a7ab7109be495894bc98a2cf04107368d5d8777a1d0ef19782ebb1527b564ac0f5d4ac91e81f435cc21f5905b9753ee1a79913306957589943da161a6f5dc3082b80930553769ce11d82d9cb12d8a12bb4e56eb3f1200eb", "television"},
{"$sshng$1$16$A0B8FCAB2B655BA3D04B2020B89A142E$1200$a4dbb4c526a6bea3aeca26e6d89f0c19ebdfb9154ce4cdb4bbfc3420ffce58dd6ae1077dba9753a001b22b07e4248bb2d4a3c4bf2cbae43f8c29f55c3c36c656aa7262fd2f429a8f7fbc443c175687b20c78ed3e409a03fb7b0afa22ef2fad63d580ce00e31948171787a8861e34d4c966d0c54d299585df757a76a278c28e899b7b16fe74d38ad9f6220a2ebbd2b427a3f29436feb2e000a3f1b26a3906eb84314a8f8dc211aeab0e5d5c776b54a59c3630a96de506fdfcf7a6991bae4e90ef2f6f99d5a92c78eddc1f7bd75a94bc472c32ef82b56261889a60fbaeee0b145c4aa785bff8b854b8c61fde3f018e10e9b4de6fbf5aa7ff30d985a8b8da1e55459855cd96076d0de5ff31a593ca7ff4badb07886808c624ceaf955569138c57fd9006877d8a174bce3347b72490d5181d83a20500dc49e8160d075659b568820ac2788a50aba0488a598c6d90821026c9a6213f279b8773eb3c5b60a73e48199ed7cba66595e7f219c4d0f5e231219619ffbd3d7bd1dad4ada8bf8d9ddbd5319ff47922e6858946778daf0e6b47973db77f56dcc356691ccc652ccd53d9f9895c896d99cf0c498e5a8d712f2e8a159a80e8a3e68b812650f0ddb0e1300438b914f4c28d232c443768bccaeb204212494782003343a5cf6d455b95efc94c8d95544db32c0539d0e1fc0288b5ecfcbc4bb7b6278a54093a56ec0ad5928c113aa96a114d7fd3aec173759f5c081f1d0a2f0922433ff17911901c0f0f940b1f345d161d91ecd4456e9b8458a14e0fcbaf2b750201c10cff3c8f387004b99be515f45c00200efea4e36d83524a760c20518d902e38d6121bef29b479edbf44be4c51730c3bbc86dd6abc40b67470e12b8235cb1317b6dae34d99248f3a8f98a77d848360c01a645f76c3abc3f66af0d1f0f7bbb77930b3f85430062fb1a82c5aff1350bdba049a8bc7bcc33e61fd3e8484b9e6d51ea121337b7553284cd1222a2469e1c7158f13ff63307530243af25b4b36d19ba0604212ebcb42b450c475e238c2b9f021088b16aacfb6e564eef86860fd077f90de471fc26621360609e526444e7556bb8d6de703271a4ba8dec254305cd1163f90a32d8966f599903de0e4b62e3a8db15753fb099d164d9bd44c05f163fd96ef73382c779214c8ec93498f2f5fa31a74ad6ac3136a37c6f6c27b1dd7b93c1e292f2ef0d569581f45cb0747ee5a2fcba5781cdc96b9b2f07bdbaf7ff4e0432873072112fd17792c91548393cd58a7eb8b126f17ee107f9670567c0ab6e6b9a2997054d968feb29f479fb8b7888138971a14228bad1854d9804f1bea77014b7f0d1037444178d66d2db19b660cf5e84726b2f730662a1df93abc54ae521d3d1691fb4fa48b087ead9dfccf4e6367d9a25f48a019a6affbec84c20ae7b10c2a169cfa07a4d26c5035c02d3b7d01681bf56bf568ab1f740c86ee6f43b8b440eea1f1139a89fa5bc653164426856e3a5e22ff5fed05ba7a054f6d4609eb142ef113a24f05b92ba72c40cd9bde09d8125d462fd30bab15cb47130fa30730b26c0d399d14b9cb42ec56df024bb9bbcd18ab4d279ccf82b2c1fee8fdbade8bd506791a6fd51349b24cdc36ec4d88e6dd43a85b92a71458908271d298681f54aa567262fc70260cc15d7f5559abd7e7ee4d2c7c727bf5036c469b690ece969480240c", "Olympics"},
{"$sshng$1$16$ABF86CF7849BBC5C661A69F1F7B4C87A$1200$4941cb1e3d19fd7173e7b97faf6484d803fe6737628241e7e848b4d02ef63c91a99aba940c116157f96e7c8e91550df51df80324a5630244ae83fcc04304ca9f7a4d9621cab45a6d513afc06b2364c222a7907729e3562f676fb94d7a3cfb2551de2766e9d67c035fecde455fd741a67521d0f71673d7c364ac314850314b31b6899198544c0f2ab788ebd435cd291ae8c12576837f784ab7cd8a9bc24bea3823034154df1b3d9b6a064131a9eb81b3fd298f128458cfce450305602e087ea9957e29117942ee7a2fd8a980da55c364f781feeb1bf355ee0de54ce946d85930f786d6f6175254d5a4370ddc5c88ef13e23429b0d9d01f413f08ce5253141d84c8368308890c69b65be93a415923f98bc898f9cb4abafcbcddf2785f698e091d75eea0a90216ca47d0459cb2b8d95a8989f604a0c7bc8dc690791c0c73e6f7a2628ea7ebd8e1a39ae583c91668dca7806f226ab361f07bfd35f7130aefc83071b865cc004f963ef80a750008e920f1854321949d6143ffc33b60b58015d5f32c820006b0a91aa08755fd859c845d3a75d89350d9c12e7df32b9bcd188681b0981ac4713505c4b516ee4d1073ea715b68d0c10ce3f562f0b5b5383a6bd53008ec0e8927d78d8fd21d760e67da700db638f3835cfd523046ee0f2fffed05c3bd902b66765629f428bc2808e300fbe2064af9ab125ac4195f3b5756e09059cc391127c8efba8e50eaeb7e0a4d98561ce9540fa6b9b6333beb1efed5751e7acc1aaf4f0ff975e548a21b08a2ab49d4e6bf2336e60eb8684debe5d611769cee17c38e02d2057284d7948fdbe459092a0e4471107f55562aceb1045f0f1cefb3d89e549422f27618137c48dce1f149f6c8748d4a1eff89eed9140ec598def8d38457a239ee167af6d60ae995261d9cb47ce2d4d25b1520f8b75408b45265cf14d3892dcb53732fa4151312f4f6c8d46a54d07c23b4b253003489a28d544fa903eb0a72a3ae914dafed5218ce8d745b23bde33c9e346db79051e763866fba38f123b32c110b4168c3baf2ace735d0fcf5ccf7c2a29d67d4831c0cf3472ab8b197ed953056c42d7cc91646ca12a7bebb23fa4fb063217b7b7c9fec7688788798424acc32b3c704a91bee6a63ca5a2186df80e225f96679568c936c9a47b5615858211c72441a9ff4dc265ba98f346984bf92969af9bd035f93a47ddf8beef9ba84eacc1f76ee4bd1eb242dc9fb2949d287f685369d1122865926270f8bc83d7118801e77e48fd2dd4b996231564d1649c4636b734e483067c1181d1edc6dd424f517cd3ea3fe1ab904cda78b7b7d6c856a82c7e1c6ba3e9fb93da1dfeaf4e3eff86b4541ab38f526f509b915f787d6abd4a4c7174dfcb18f36ba72fa69b61a060b2785b3d3e8d28e9f6aa1a32aca3948ee48188a7ee24b160f3a6bc98297bd852d0759080cecd85dbc91bf4404705948c6a169e140a2479cdf5b840c3d6f99ea4e09b76730b4d33300f6a963c90cb0e07833a4bf314d72d81ae8ed5cf5ca4bcb6f35acb0c7d8298b70a5b61f87b13c3b1d02b56fe42c5465ad57dd4041b9b36943188acb8742052669b95fd98f3d18351f8748e9eb0f47d11a4d6ca2ec0348ef7d24e9f80c1dc239b513ed7867f25903875a1e9a983c5c8475b8de1f7f70423f1f472fca1e99a52b14105c4a47edb657eb87d0353", "extuitive"},
{"$sshng$1$16$925FA0A2EF7283A2F69C6CE69121D43C$1200$0498402851fd405114a860a1fdc760752bc8b7f44c77b2ef6a6d46ed3cee48d963bf34b905124c18823bc69819bbec29edebf4e697afffec2c35e79b993ff28b92d0355758b9c4ea00fb1f4bd48732059643ca2144b9c35de734d8db395076cb7c0468f6cfbabb1646345f907af82bf1598733d7aaa5496c55e662075d6bdb47cb941160fd1106570303d009bdc89fa3ecc07c84c3f91238a51db8ecc09f8e6b6c1395ce57970cbf2a3ef1341ddcb404e95832f0535a30b17048554b3341502619c48685db4706855ce62a86b3953f1219d4dae10243265d01264fa6408006188a40683e5de4952cb6796cd2593e9365065f51ff21b23b8bc075445226092b988114962ed5f4b97128cc69eca7a3d1169d2d83a632a5cc51290527bc848c7dd3d76554b28bb2bea0626f4fd27f3b9610e827e8211c60879d77ea1593d80908618b55081048bc2baef6848c410372b9a69358feb95c23d747f81b59577c601d55337b7c737d77bd742a115681a778c3d8e513a3ccd25cf833a32c73bf04476131b2bb498fac9496597163766b5f466b2478a564736c245cf0a0bf4b33be13eb2360dacbf8573b342f336d0341229654cd140674b18e35c04f917a9668306b4c93285825bdc8494c209d103212ea1deac7839db28acfb50fabc5c2b5057333ecbcb685adef5e962a526a02fd44f40a5af9c27d4211af129ad47b5fbc1d5f9f01e5ad1c53f728ead66a45cb8e6a9c1237aeb02374225ef2b63bc3ea6b2b1ab6136f90236ed5de5f88c6edde8ea75db8cf9aed8030537731dfe3ee855ab501f0235aeb05c8b2e3f4668ca0ab230cc8764863bf3ea71bbce2763556a14cdc5e09b0fa8e9ce6948d377b087fe04d1a5ae2ca61350514376cf447119fad0ea158b16b86be8f43742fb9934d3c1e8cc46497c191d1703a85e0b8b102b27595471687c5d1335a2290214fd46d9568d4b2845b88f116d5c2b3e3766030beb3d71157ff0c4fabd13aa173795db5b88d059ec79bf50c22f3119411b4279d1c7c0e88a7b01fa47e52553913b0ceee272500fedfa28483a849c186ce31b2134945dcaa84c13f7e474d59b0a0f5f768a8ec4cd58c8499b3ba3e1880fa7764ea9e424b29e5f6ea93671bce2985ea0d51efc2774f023c65e99be3db57c4a83e3c2f59fee62f60fa8c7eb66ff870f05cffd7ea208520a0640fe86f619944b389cfe695567ebc82829407273ac67130d3b09c8ff172a77a8ef56b18aac65d5607ef9f6ee791c0ec5b6447bd847b5d6a5411142a110700d5bb04424111ddfee27139ebad931da60de1e8bfc87f2b53b8720435d3dbb358445fc3493ada42192783741f72b5934d6a399e1ea16291fad9f38e49f23e3ad7303d4d1e5677b9a81aff8dfca7abb33455e4e7858a9de656e4239c22ac2e91b558bcc25b356be583487ffc24459873febd2becae6056544d56fe670342347048a8abca019d2203794fd8652d31899f094d67aa304d1e607460efbdf05b3b407de54fc9e33d1879fe577091036b77e43e382f1acbbc12cb3bc83f25a4791265741e018b4825beb0a6901db19ee58a3c378df4ffeb4c9def7e730a08546d3f698f5ca4f98c81deb2982729ab95167ecaa1d6320b12d48f4de2fc9891b8e117c88a6f5bff046b1ea8cab4b0af8a488dfa6353ccaa3125e959322bd0ad4662ad15cffb86f3", "C0Ld.FUS10N"},
/* DSA test vectors */
{"$sshng$0$8$78DAEB836ED0A646$448$95d5a4abd38c957a969a322aa6936798d3c8523e6e553d762e4068b130294db89b4e67b790825bd6e0de1b60528557d8faf0ce4d413d92818f0cbb315b5b7902df845722032bc6883b4b87b5e5cce406c15f6d0b2d45916d156a661b0cc6a421dc7dd794788df9085a59c6f87c5baed7c6bc4a48a64c5a439d9b9f7e808397fce1fc1ed789e0114cb03cd392bf660541041c1f964476044d39dd71eb240231f4111494b3fbe85a35f2bbe32d93927aedecf959e786a51be450ade61e746b8eae6174016e8dabf59a358a518c3445c93b4824e61c065664f24b3e773643c0e47996b7c348cefe63407303cbb37e672905bb0a4fd51e4cfd920563863987f96f9fa2098d0ed5c9244f21ba4df28d9826fd8e0f525af349f7b54f0c83bee8de8e1d3702a6edc0a396af85b8805d3ac4a0b01f053d0454856fa3a450f199637ae0333670483a454769b5bcbb5a6329d07c0ad6ac847f11e32ccb835650fb9404880c1ad19548cfb57107d43cc8610b9869165a8b116867b118f97ef74f09ab285114512f599d066d46dae846a1b04787f3e30410b234e5fc098e8a39419a2dbdb5a25c709b13fd31eb2d0e6994e11df0e32ff45b1a3c95c153ce606912a8dc966daf", "television"},
#ifdef DEBUG
/* this key is SUPER slow, now that OMP_SCALE has been increased. */
/* it would be nice to get one of these with rounds set to 2, */
/* instead of the rounds=64 of this hash (pass_gen.pl update) */
/* new ssh key format */
{"$sshng$2$16$cc2c3c68c39e0ba6289ed36cb92c3a73$1334$6f70656e7373682d6b65792d7631000000000a6165733235362d636263000000066263727970740000001800000010cc2c3c68c39e0ba6289ed36cb92c3a73000000400000000100000117000000077373682d727361000000030100010000010100af9bf6a900464f154916fac3d80476e0ee739ff7f25a96b562ff9f4262db1972992947dfa89da47f9fa5f4d9e54a2d103ce63779746888c298693663310f054af1c1dc90f62b22f630703726631c03ff217c29a32fd9f9bc178aabe9666c37c2c2bf4a2b4c528efe51e755053216d41e860ef996b549184cd15bd17641128690d2946a76261954edfee942bbefbb182df320d3da7f46a5fcddc15b5ecbf9b1b822cbc9ef978e8b639e8eab2e3b1229d429da4f6bdc27af2f2aab0e187a6cce91b95a8ac6f5602773d0014f1e8124a89e43e502bebb4d21f6a148e208e2d591391d1aede6a0a6d499a3de9996474310dd9d3233e3f05e9d0e85aba44715e838bd000003d08168da8d056f904faf9d80b22c08141e8b068a3af64ace3b5ffbad24b884cd37ae7ad89546031ab834d612b44266b95263a5c38f0d628d704caf70944629ad66d3cef974ec4faaaeb7d7df67f1321bb606ec6e14060c0de1a63a5732ca89b94ae765cb0671a4a1a76b42c06c220546bbf0f8a88471c0bf4200a0cbe0d346be67f688dcf76a3666f7c4447b3ced2d0c9a2fa50abc6ca222ddd70aeb82d65f8fefa313b3db76c5a03478bebc9e0942e17c07ae11d1fbe1b0b380ca2506a26aaf5cdb8668af186d1bc293844bd9c2cc8bb40530387f9a5e11770484593af69384fc003beb82beffa00c1b23f7d6a9bd8f6153cb7abd9531008df384a3455d7cdd7020df4dc507f34e697ad437f01989271b17b93045265f20e6fd02f63ac1e13ec85f8224bc60dd91e15dcfa2ec4f6986e3b37ea6bd571ca18089402f80c121323eb774708cc6ab470e05a53428b65dede47ded97c4f5941be44f6290d5ccdd9bea95b06190efee6c64d874798b6045c5d553a1f68c95f143d0a6893877796fff452851d64ac73c007b91dd6058a5c31165003d9d66b4a1a40c2f82e5c3be6820b109addc0f088c84576e30c7202da3304636de4035f3ca8b032885aa2bedb4d1e134c1615139fb6ed7fa924c2e8abdfcd75da029e910ee8a9d4af594e2a9732115237b6ba3c24f8dfd4bed0a7cb4d96e114bff30e9c68226ae04de6fee2340b41c49cd08982a3f21169853366882a4af43e256cb0d09c88856c46f2ad8a7bcc3896efe5f4f104ef9b595cd08b4b76d6ac074f4fa4a488f508c6106603cb4ca65af819d2222a086ddd16a63021627f337ab9d86b33150808313bfe7368737bf38e7dee410cf08f2effef780d161e2cb734135bba36fe2ee3319cda95242b89b50673c88eb3dfa331e987e3fbde92cec7e019990d97b11c71d5b04b8ec451549abc9ed195a080aefb1d77eff476f9de4315fca5bf6386438869a8d59a5f0badda70b337bb9bdcff966229d631286d3c5b97c41f3ef5daa6ef4416577815214733e8602ef7f8abc3a19ee58f48b10c8ab1d5c76f01febdb29b36910d615d4022849ec117f02b6ae898cc0ff67e61df43284d3ff739ab4c34fe2854797ae0b66e0ba234e236daba6eb9172e9e1f4a0f5283ae9b336059d2ab2c7145e0a4de4b5bed3baf87c90ad4d47b94eb1c01b07510191f06b9eaf014e225b2bce46d5a7080c6d1daf64460836d7630c157e44afc9483a777d76fcafbfc2c4f299211c0465f0151f13707f815700944ad6a17e23e63dd0eecb5cdb5284ad92dd853e0ce136bc77633fef514e6aadeb61e7fe885fe399076cbd5464a6d17efa1e116853e80cf08adea7e550b0d27e6a96d835069674fd7bcc$64", "12345"},
#endif
// EC private key
{"$sshng$3$16$00B535FBA963402F20C12648A59D7258$128$dfa09369ff38f33c9789d33760d16fdd47730311b41b51a0c7b1dd1dec850c5c2ff523710af12839f25a709f0076cdd3e3643fab2ea1d17c6fae52a797b55e752b71a1fdd46d5bd889b51ddc2a01922340e5be914a67dabf666aff1c88275bd8ec3529e26386279adeb480446ab869dc27c160bd8fe469d5f993b90aaffef8ce", "password123"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char salt[16];
unsigned char ct[N];
int cipher;
int ctl;
int sl;
int rounds;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char buf[sizeof(struct custom_salt)+100];
strnzcpy(buf, ciphertext, sizeof(buf));
strlwr(buf);
return buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len, cipher;
if (strncmp(ciphertext, "$sshng$", 7) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 7;
if ((p = strtokm(ctcopy, "$")) == NULL) /* cipher */
goto err;
if (!isdec(p))
goto err;
cipher = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* salt len */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (hexlen(p) != len * 2)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* ciphertext */
goto err;
if (hexlen(p) / 2 != len)
goto err;
if (cipher == 2) {
if ((p = strtokm(NULL, "$")) == NULL) /* rounds */
goto err;
if (!isdec(p))
goto err;
}
if (cipher != 0 && cipher != 1 && cipher != 2 && cipher != 3) {
fprintf(stderr, "[ssh-ng] cipher value of %d is not supported!\n", cipher);
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(struct custom_salt));
ctcopy += 7; /* skip over "$sshng$" */
p = strtokm(ctcopy, "$");
cs.cipher = atoi(p);
p = strtokm(NULL, "$");
cs.sl = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.sl; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
cs.ctl = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs.ctl; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if (cs.cipher == 2) {
p = strtokm(NULL, "$");
cs.rounds = atoi(p);
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#if 0
static void generate_key_bytes(int nbytes, unsigned char *password, unsigned char *key)
{
unsigned char digest[16] = {0};
int keyidx = 0;
int digest_inited = 0;
int size = 0;
int i = 0;
while (nbytes > 0) {
MD5_CTX ctx;
MD5_Init(&ctx);
if (digest_inited) {
MD5_Update(&ctx, digest, 16);
}
MD5_Update(&ctx, password, strlen((const char*)password));
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(digest, &ctx);
digest_inited = 1;
if (nbytes > 16)
size = 16;
else
size = nbytes;
/* copy part of digest to keydata */
for(i = 0; i < size; i++)
key[keyidx++] = digest[i];
nbytes -= size;
}
}
#endif
static inline void generate16key_bytes(unsigned char *password,
unsigned char *key)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, password, strlen((const char*)password));
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
/* digest is keydata */
MD5_Final(key, &ctx);
}
static inline void generate24key_bytes(unsigned char *password,
unsigned char *key)
{
unsigned char digest[16];
int len = strlen((const char*)password);
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, password, len);
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
/* digest is keydata */
MD5_Final(key, &ctx);
MD5_Init(&ctx);
MD5_Update(&ctx, key, 16);
MD5_Update(&ctx, password, len);
/* use first 8 bytes of salt */
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(digest, &ctx);
/* 8 more bytes of keydata */
memcpy(&key[16], digest, 8);
}
static inline int check_padding_only(unsigned char *out, int length)
{
int pad;
int i;
// check padding
pad = out[length - 1];
if(pad > 16 || length < 16)
return -1;
if (pad < 4) { // XXX is this possible? if yes, will killing this result in too many false positives?
return -1;
}
for(i = length - 1; i > pad; i--) // check for 0102030405060708090a like sequence
if(out[i] - 1 != out[i - 1])
return -1;
return 0; // valid padding!
}
static inline int check_padding_and_structure_EC(unsigned char *out, int length, int strict_mode)
{
struct asn1_hdr hdr;
const uint8_t *pos, *end;
// First check padding
if (check_pkcs_pad(out, length, 16) < 0)
return -1;
/* check BER decoding, EC private key file contains:
*
* SEQUENCE, INTEGER (length 1), OCTET STRING, cont, OBJECT, cont, BIT STRING
*
* $ ssh-keygen -t ecdsa -f unencrypted_ecdsa_sample.key # don't use a password for testing
* $ openssl asn1parse -in unencrypted_ecdsa_sample.key # see the underlying structure
*/
// SEQUENCE
if (asn1_get_next(out, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_SEQUENCE) {
goto bad;
}
pos = hdr.payload;
end = pos + hdr.length;
// version Version (Version ::= INTEGER)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
if (hdr.length != 1)
goto bad;
// OCTET STRING
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_OCTETSTRING) {
goto bad;
}
pos = hdr.payload + hdr.length;
if (hdr.length < 8) // "secp112r1" curve uses 112 bit prime field, rest are bigger
goto bad;
// XXX add more structure checks!
return 0;
bad:
return -1;
}
static inline int check_padding_and_structure(unsigned char *out, int length, int strict_mode)
{
struct asn1_hdr hdr;
const uint8_t *pos, *end;
// First check padding
if (check_pkcs_pad(out, length, 16) < 0)
return -1;
/* check BER decoding, private key file contains:
*
* RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p }
* DSAPrivateKey = { version = 0, p, q, g, y, x }
*
* openssl asn1parse -in test_rsa.key # this shows the structure nicely!
*/
// SEQUENCE
if (asn1_get_next(out, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_SEQUENCE) {
goto bad;
}
pos = hdr.payload;
end = pos + hdr.length;
// version Version (Version ::= INTEGER)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
// INTEGER (big one)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
/* NOTE: now this integer has to be big, is this always true?
* RSA (as used in ssh) uses big prime numbers, so this check should be OK */
if (hdr.length < 64) {
goto bad;
}
if (strict_mode) {
// INTEGER (small one)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
// INTEGER (big one again)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
pos = hdr.payload + hdr.length;
if (hdr.length < 32) {
goto bad;
}
}
return 0;
bad:
return -1;
}
int bcrypt_pbkdf(const char *pass, size_t passlen, const uint8_t *salt, size_t saltlen,
uint8_t *key, size_t keylen, unsigned int rounds);
static void common_crypt_code(char *password, unsigned char *out, int full_decrypt)
{
if (cur_salt->cipher == 0) {
unsigned char key[24] = {0};
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
generate24key_bytes((unsigned char*)password, key);
memset(out, 0, SAFETY_FACTOR);
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, cur_salt->salt, 8);
if (full_decrypt) {
DES_ede3_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
} else {
DES_ede3_cbc_encrypt(cur_salt->ct, out, SAFETY_FACTOR, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
DES_ede3_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 32, out + cur_salt->ctl - 32, 32, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
}
} else if (cur_salt->cipher == 1) {
unsigned char key[16] = {0};
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, cur_salt->salt, 16);
memset(out, 0, SAFETY_FACTOR);
memset(out + cur_salt->ctl - 32, 0, 32);
generate16key_bytes((unsigned char*)password, key);
AES_set_decrypt_key(key, 128, &akey);
if (full_decrypt) {
AES_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &akey, iv, AES_DECRYPT);
} else {
AES_cbc_encrypt(cur_salt->ct, out, SAFETY_FACTOR, &akey, iv, AES_DECRYPT); // are starting SAFETY_FACTOR bytes enough?
// decrypting 1 blocks (16 bytes) is enough for correct padding check
}
memcpy(iv, cur_salt->ct + cur_salt->ctl - 32, 16);
AES_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 16, out + cur_salt->ctl - 16, 16, &akey, iv, AES_DECRYPT);
} else if (cur_salt->cipher == 2) { /* new ssh key format handling */
unsigned char key[32+16] = {0};
AES_KEY akey;
unsigned char iv[16];
// derive (key length + iv length) bytes
bcrypt_pbkdf(password, strlen((const char*)password), cur_salt->salt, 16, key, 32 + 16, cur_salt->rounds);
AES_set_decrypt_key(key, 256, &akey);
memcpy(iv, key + 32, 16);
AES_cbc_encrypt(cur_salt->ct + cur_salt->ctl - 32, out, 32, &akey, iv, AES_DECRYPT); // decrypt 2 blocks
} else if (cur_salt->cipher == 3) { // EC keys with AES-128
unsigned char key[16] = {0};
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, cur_salt->salt, 16);
memset(out, 0, N);
generate16key_bytes((unsigned char*)password, key);
AES_set_decrypt_key(key, 128, &akey);
AES_cbc_encrypt(cur_salt->ct, out, cur_salt->ctl, &akey, iv, AES_DECRYPT); // full decrypt
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char out[N];
common_crypt_code(saved_key[index], out, 0); // don't do full decryption (except for EC keys)
if (cur_salt->cipher == 0 || cur_salt->cipher == 1) {
if (check_padding_and_structure(out, cur_salt->ctl, 0) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->cipher == 2) { // new ssh key format handling
if (check_padding_only(out + 16, 16) == 0) /* always check the last block (16 bytes) */
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->cipher == 3) { // EC keys
if (check_padding_and_structure_EC(out, cur_salt->ctl, 0) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
unsigned char out[N];
common_crypt_code(saved_key[index], out, 1); // do full decryption!
if (cur_salt->cipher == 0 || cur_salt->cipher == 1) {
if (check_padding_and_structure(out, cur_salt->ctl, 1) == 0)
return 1;
else
return 0;
} else if (cur_salt->cipher == 2) { /* new ssh key format handling */
if (check_padding_only(out + 16, 16) == 0) /* always check the last block (16 bytes) */
return 1;
else
return 0;
} else if (cur_salt->cipher == 3) { // EC keys
return 1;
}
return 0;
}
static void sshng_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_sshng = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
sshng_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
sshng_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_BIO_NEW */
|
example.c | int g1 = 10;
void foo() {
test: g1++;
}
void bar(int x) {
int y = 10;
if (x < 10) {
bar(x++);
}
}
void foobar(int x) {
x = 10;
int yz = 11;
}
int main() {
int x = 10;
int z = 11;
#pragma omp parallel
{
#pragma omp atomic update
z = 0;
#pragma omp barrier
if (x < 9) {
#pragma omp atomic update
z++;
#pragma omp barrier
} else {
#pragma omp barrier
#pragma omp atomic update
z++;
}
}
z = 11;
#pragma omp parallel
{
#pragma omp atomic update
z++;
#pragma omp barrier
#pragma omp atomic update
x++;
}
int thisVar = 14;
test: x++;
while (x > 8) {
l2: x--;
if (x == 1) {
thisStmt: x = 3 + x++ + thisVar;
foo();
int z = 10;
l1: x = 10;
bar(z);
}
bar(10);
}
do {
x--;
x = x + 2;
x = x - g1;
thisVar++;
} while (x > 0);
while (thisVar++ < 5) {
if (thisVar == 10) {
continue;
}
}
}
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
(void) GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
(void) GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case CompositeMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | CompositeMaskChannel);
break;
}
case ReadMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | ReadMaskChannel);
break;
}
case WriteMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | WriteMaskChannel);
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask |
(1UL << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
register ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelGray(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1UL << channel),
exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
register ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
register ssize_t
i;
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
PixelInfo
background;
/*
Remove transparency.
*/
ConformPixelInfo(image,&image->background_color,&background,exception);
background.alpha_trait=BlendPixelTrait;
image->alpha_trait=BlendPixelTrait;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=background;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.alpha=GetPixelIntensity(image,q);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
cache_wkld.c | /*
* Code to simulate a cache-intensive workload for lab assignment in [A2] Task Mapping on Soft Heterogeneous Systems.
* Workload consists of a simple parallel initilization routine.
* Implementation is not optimized! Only meant to be used in conjunction with lab assignment.
* The performance of the workload is sensitive to the cache siz. In a task mapping scenario,
* this workload should be mapped to the set of cores with largest cache.
*
* @author: Apan Qasem <apan@txstate.edu>
* @date: 04/02/20
*
* @update: 03/12/21
*/
#include<stdlib.h>
#include<stdio.h>
#include<sys/time.h>
#include <omp.h>
#define ELEMENTS_TO_VERIFY 1
#define REPS 50
/* timer function */
double get_time_in_seconds() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
int main(int argc, char *argv[]) {
int **a;
if (argc < 2) {
printf("usage: \n");
printf(" ./cache_wkld N t\n");
printf(" N = input size\n");
printf(" t = number of OpenMP threads\n");
exit(0);
}
long long N = atoi(argv[1]);
unsigned threads = atoi(argv[2]);
omp_set_num_threads(threads);
a = (int **) malloc(sizeof(int *) * N);
int i,j,k;
for (i = 0; i < N; i++)
a[i] = (int *) malloc(sizeof(int) * N);
double start_time, end_time;
start_time = get_time_in_seconds();
#pragma omp parallel for private(j,i)
for (k = 0; k < REPS; k++) {
for (j = 1; j < N; j++)
for (i = 1; i < N; i++)
a[i][j] = 17;
}
end_time = get_time_in_seconds();
fprintf(stdout, "\033[1;33m[wk2] compute time = %.3f s\n\033[0m", end_time - start_time);
#ifdef VERIFY
fprintf(stdout, "Verification: ");
for (int i = 0; i < ELEMENTS_TO_VERIFY; i++)
fprintf(stdout, "%d\n", a[0][i]);
#endif
return 0;
}
|
compress.c |
/*-------------------------------------------------------------*/
/*--- Compression machinery (not incl block sorting) ---*/
/*--- compress.c ---*/
/*-------------------------------------------------------------*/
/* ------------------------------------------------------------------
This file is part of bzip2/libbzip2, a program and library for
lossless, block-sorting data compression.
bzip2/libbzip2 version 1.0.6 of 6 September 2010
Copyright (C) 1996-2010 Julian Seward <jseward@bzip.org>
Please read the WARNING, DISCLAIMER and PATENTS sections in the
README file.
This program is released under the terms of the license contained
in the file LICENSE.
------------------------------------------------------------------ */
/* CHANGES
0.9.0 -- original version.
0.9.0a/b -- no changes in this file.
0.9.0c -- changed setting of nGroups in sendMTFValues()
so as to do a bit better on small files
*/
#include "bzlib_private.h"
#include<omp.h>
#include<pthread.h>
#include<semaphore.h>
/*---------------------------------------------------*/
/*--- Bit stream I/O ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
void BZ2_bsInitWrite ( EState* s )
{
s->bsLive = 0;
s->bsBuff = 0;
}
/*---------------------------------------------------*/
static
void bsFinishWrite ( EState* s )
{
while (s->bsLive > 0) {
s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
s->numZ++;
s->bsBuff <<= 8;
s->bsLive -= 8;
}
}
/*---------------------------------------------------*/
#define bsNEEDW(nz) \
{ \
while (s->bsLive >= 8) { \
s->zbits[s->numZ] \
= (UChar)(s->bsBuff >> 24); \
s->numZ++; \
s->bsBuff <<= 8; \
s->bsLive -= 8; \
} \
}
/*---------------------------------------------------*/
static
__inline__
void bsW ( EState* s, Int32 n, UInt32 v )
{
bsNEEDW ( n );
s->bsBuff |= (v << (32 - s->bsLive - n));
s->bsLive += n;
}
/*---------------------------------------------------*/
static
void bsPutUInt32 ( EState* s, UInt32 u )
{
bsW ( s, 8, (u >> 24) & 0xffL );
bsW ( s, 8, (u >> 16) & 0xffL );
bsW ( s, 8, (u >> 8) & 0xffL );
bsW ( s, 8, u & 0xffL );
}
/*---------------------------------------------------*/
static
void bsPutUChar ( EState* s, UChar c )
{
bsW( s, 8, (UInt32)c );
}
/*---------------------------------------------------*/
/*--- The back end proper ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
static
void makeMaps_e ( EState* s )
{
Int32 i;
s->nInUse = 0;
for (i = 0; i < 256; i++)
if (s->inUse[i]) {
s->unseqToSeq[i] = s->nInUse;
s->nInUse++;
}
}
/*---------------------------------------------------*/
static
void generateMTFValues ( EState* s )
{
UChar yy[256];
Int32 i, j;
Int32 zPend;
Int32 wr;
Int32 EOB;
/*
After sorting (eg, here),
s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
and
((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
holds the original block data.
The first thing to do is generate the MTF values,
and put them in
((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
Because there are strictly fewer or equal MTF values
than block values, ptr values in this area are overwritten
with MTF values only when they are no longer needed.
The final compressed bitstream is generated into the
area starting at
(UChar*) (&((UChar*)s->arr2)[s->nblock])
These storage aliases are set up in bzCompressInit(),
except for the last one, which is arranged in
compressBlock().
*/
UInt32* ptr = s->ptr;
UChar* block = s->block;
UInt16* mtfv = s->mtfv;
makeMaps_e ( s );
EOB = s->nInUse+1;
for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
wr = 0;
zPend = 0;
for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
for (i = 0; i < s->nblock; i++) {
UChar ll_i;
AssertD ( wr <= i, "generateMTFValues(1)" );
j = ptr[i]-1; if (j < 0) j += s->nblock;
ll_i = s->unseqToSeq[block[j]];
AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
if (yy[0] == ll_i) {
zPend++;
} else {
if (zPend > 0) {
zPend--;
while (True) {
if (zPend & 1) {
mtfv[wr] = BZ_RUNB; wr++;
s->mtfFreq[BZ_RUNB]++;
} else {
mtfv[wr] = BZ_RUNA; wr++;
s->mtfFreq[BZ_RUNA]++;
}
if (zPend < 2) break;
zPend = (zPend - 2) / 2;
};
zPend = 0;
}
{
register UChar rtmp;
register UChar* ryy_j;
register UChar rll_i;
rtmp = yy[1];
yy[1] = yy[0];
ryy_j = &(yy[1]);
rll_i = ll_i;
while ( rll_i != rtmp ) {
register UChar rtmp2;
ryy_j++;
rtmp2 = rtmp;
rtmp = *ryy_j;
*ryy_j = rtmp2;
};
yy[0] = rtmp;
j = ryy_j - &(yy[0]);
mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
}
}
}
if (zPend > 0) {
zPend--;
while (True) {
if (zPend & 1) {
mtfv[wr] = BZ_RUNB; wr++;
s->mtfFreq[BZ_RUNB]++;
} else {
mtfv[wr] = BZ_RUNA; wr++;
s->mtfFreq[BZ_RUNA]++;
}
if (zPend < 2) break;
zPend = (zPend - 2) / 2;
};
zPend = 0;
}
mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
s->nMTF = wr;
}
/*---------------------------------------------------*/
#define BZ_LESSER_ICOST 0
#define BZ_GREATER_ICOST 15
static
void sendMTFValues ( EState* s )
{
Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
Int32 nGroups, nBytes;
/*--
UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
is a global since the decoder also needs it.
Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
are also globals only used in this proc.
Made global to keep stack frame size small.
--*/
UInt16 cost[BZ_N_GROUPS];
Int32 fave[BZ_N_GROUPS];
UInt16* mtfv = s->mtfv;
if (s->verbosity >= 3)
VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
"%d+2 syms in use\n",
s->nblock, s->nMTF, s->nInUse );
alphaSize = s->nInUse+2;
for (t = 0; t < BZ_N_GROUPS; t++)
for (v = 0; v < alphaSize; v++)
s->len[t][v] = BZ_GREATER_ICOST;
/*--- Decide how many coding tables to use ---*/
AssertH ( s->nMTF > 0, 3001 );
if (s->nMTF < 200) nGroups = 2; else
if (s->nMTF < 600) nGroups = 3; else
if (s->nMTF < 1200) nGroups = 4; else
if (s->nMTF < 2400) nGroups = 5; else
nGroups = 6;
/*--- Generate an initial set of coding tables ---*/
{
Int32 nPart, remF, tFreq, aFreq;
nPart = nGroups;
remF = s->nMTF;
gs = 0;
while (nPart > 0) {
tFreq = remF / nPart;
ge = gs-1;
aFreq = 0;
while (aFreq < tFreq && ge < alphaSize-1) {
ge++;
aFreq += s->mtfFreq[ge];
}
if (ge > gs
&& nPart != nGroups && nPart != 1
&& ((nGroups-nPart) % 2 == 1)) {
aFreq -= s->mtfFreq[ge];
ge--;
}
if (s->verbosity >= 3)
VPrintf5( " initial group %d, [%d .. %d], "
"has %d syms (%4.1f%%)\n",
nPart, gs, ge, aFreq,
(100.0 * (float)aFreq) / (float)(s->nMTF) );
for (v = 0; v < alphaSize; v++)
if (v >= gs && v <= ge)
s->len[nPart-1][v] = BZ_LESSER_ICOST; else
s->len[nPart-1][v] = BZ_GREATER_ICOST;
nPart--;
gs = ge+1;
remF -= aFreq;
}
}
/*---
Iterate up to BZ_N_ITERS times to improve the tables.
---*/
for (iter = 0; iter < BZ_N_ITERS; iter++) {
for (t = 0; t < nGroups; t++) fave[t] = 0;
for (t = 0; t < nGroups; t++)
for (v = 0; v < alphaSize; v++)
s->rfreq[t][v] = 0;
/*---
Set up an auxiliary length table which is used to fast-track
the common case (nGroups == 6).
---*/
if (nGroups == 6) {
for (v = 0; v < alphaSize; v++) {
s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
}
}
nSelectors = 0;
totc = 0;
gs = 0;
while (True) {
/*--- Set group start & end marks. --*/
if (gs >= s->nMTF) break;
ge = gs + BZ_G_SIZE - 1;
if (ge >= s->nMTF) ge = s->nMTF-1;
/*--
Calculate the cost of this group as coded
by each of the coding tables.
--*/
for (t = 0; t < nGroups; t++) cost[t] = 0;
if (nGroups == 6 && 50 == ge-gs+1) {
/*--- fast track the common case ---*/
register UInt32 cost01, cost23, cost45;
register UInt16 icv;
cost01 = cost23 = cost45 = 0;
# define BZ_ITER(nn) \
icv = mtfv[gs+(nn)]; \
cost01 += s->len_pack[icv][0]; \
cost23 += s->len_pack[icv][1]; \
cost45 += s->len_pack[icv][2]; \
BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
# undef BZ_ITER
cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
} else {
/*--- slow version which correctly handles all situations ---*/
for (i = gs; i <= ge; i++) {
UInt16 icv = mtfv[i];
for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
}
}
/*--
Find the coding table which is best for this group,
and record its identity in the selector table.
--*/
bc = 999999999; bt = -1;
for (t = 0; t < nGroups; t++)
if (cost[t] < bc) { bc = cost[t]; bt = t; };
totc += bc;
fave[bt]++;
s->selector[nSelectors] = bt;
nSelectors++;
/*--
Increment the symbol frequencies for the selected table.
--*/
if (nGroups == 6 && 50 == ge-gs+1) {
/*--- fast track the common case ---*/
# define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
# undef BZ_ITUR
} else {
/*--- slow version which correctly handles all situations ---*/
for (i = gs; i <= ge; i++)
s->rfreq[bt][ mtfv[i] ]++;
}
gs = ge+1;
}
if (s->verbosity >= 3) {
VPrintf2 ( " pass %d: size is %d, grp uses are ",
iter+1, totc/8 );
for (t = 0; t < nGroups; t++)
VPrintf1 ( "%d ", fave[t] );
VPrintf0 ( "\n" );
}
/*--
Recompute the tables based on the accumulated frequencies.
--*/
/* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
comment in huffman.c for details. */
for (t = 0; t < nGroups; t++)
BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
alphaSize, 17 /*20*/ );
}
AssertH( nGroups < 8, 3002 );
AssertH( /* nSelectors < 32768 && */
nSelectors <= BZ_MAX_SELECTORS,
3003 );
/*--- Compute MTF values for the selectors. ---*/
{
UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
for (i = 0; i < nGroups; i++) pos[i] = i;
for (i = 0; i < nSelectors; i++) {
ll_i = s->selector[i];
j = 0;
tmp = pos[j];
while ( ll_i != tmp ) {
j++;
tmp2 = tmp;
tmp = pos[j];
pos[j] = tmp2;
};
pos[0] = tmp;
s->selectorMtf[i] = j;
}
};
/*--- Assign actual codes for the tables. --*/
for (t = 0; t < nGroups; t++) {
minLen = 32;
maxLen = 0;
for (i = 0; i < alphaSize; i++) {
if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
if (s->len[t][i] < minLen) minLen = s->len[t][i];
}
AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
AssertH ( !(minLen < 1), 3005 );
BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
minLen, maxLen, alphaSize );
}
/*--- Transmit the mapping table. ---*/
{
Bool inUse16[16];
for (i = 0; i < 16; i++) {
inUse16[i] = False;
for (j = 0; j < 16; j++)
if (s->inUse[i * 16 + j]) inUse16[i] = True;
}
nBytes = s->numZ;
for (i = 0; i < 16; i++)
if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
for (i = 0; i < 16; i++)
if (inUse16[i])
for (j = 0; j < 16; j++) {
if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
}
if (s->verbosity >= 3)
VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
}
/*--- Now the selectors. ---*/
nBytes = s->numZ;
bsW ( s, 5 /* 3 */, nGroups ); // changed @aditya
bsW ( s, 17 /* 15 */, nSelectors ); // changed @aditya
for (i = 0; i < nSelectors; i++) {
for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
bsW(s,1,0);
}
if (s->verbosity >= 3)
VPrintf1( "selectors %d, ", s->numZ-nBytes );
/*--- Now the coding tables. ---*/
nBytes = s->numZ;
for (t = 0; t < nGroups; t++) {
Int32 curr = s->len[t][0];
bsW ( s, 5, curr );
for (i = 0; i < alphaSize; i++) {
while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
bsW ( s, 1, 0 );
}
}
if (s->verbosity >= 3)
VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
/*--- And finally, the block data proper ---*/
nBytes = s->numZ;
selCtr = 0;
gs = 0;
while (True) {
if (gs >= s->nMTF) break;
ge = gs + BZ_G_SIZE - 1;
if (ge >= s->nMTF) ge = s->nMTF-1;
AssertH ( s->selector[selCtr] < nGroups, 3006 );
if (nGroups == 6 && 50 == ge-gs+1) {
/*--- fast track the common case ---*/
UInt16 mtfv_i;
UChar* s_len_sel_selCtr
= &(s->len[s->selector[selCtr]][0]);
Int32* s_code_sel_selCtr
= &(s->code[s->selector[selCtr]][0]);
# define BZ_ITAH(nn) \
mtfv_i = mtfv[gs+(nn)]; \
bsW ( s, \
s_len_sel_selCtr[mtfv_i], \
s_code_sel_selCtr[mtfv_i] )
BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
# undef BZ_ITAH
} else {
/*--- slow version which correctly handles all situations ---*/
for (i = gs; i <= ge; i++) {
bsW ( s,
s->len [s->selector[selCtr]] [mtfv[i]],
s->code [s->selector[selCtr]] [mtfv[i]] );
}
}
gs = ge+1;
selCtr++;
}
AssertH( selCtr == nSelectors, 3007 );
if (s->verbosity >= 3)
VPrintf1( "codes %d\n", s->numZ-nBytes );
}
void merge_two_sort_arrays ( EState *s )
{
unsigned char *block = (unsigned char*) s->arr2;
unsigned int *h_first_sort_rank = (unsigned int*) s->arr1_first_sort_rank;
unsigned int *h_first_sort_index = (unsigned int*) s->arr1_first_sort;
unsigned int *h_second_sort_index = (unsigned int*) s->arr1_second_sort;
unsigned int *order = (unsigned int*) s->arr1;
/* stores position of index 0 in BWT transform */
s->origPtr = -1;
int originalLength = s->nblock;
int firstSortLength = s->first_sort_length;
int secondSortLength = originalLength - firstSortLength;
int countOrderArr = 0;
int countFirstArr = 0;
int countSecondArr = 0;
int indexFirst;
int indexSecond;
for(countOrderArr = 0; countOrderArr < originalLength && countFirstArr < firstSortLength && countSecondArr < secondSortLength; countOrderArr++) {
indexFirst = h_first_sort_index[countFirstArr];
indexSecond = h_second_sort_index[countSecondArr];
if(block[indexFirst] != block[indexSecond]) {
block[indexFirst] < block[indexSecond] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
continue;
}
if(indexFirst == originalLength - 1) {
if(block[0] == block[indexSecond + 1]) {
if(indexSecond == originalLength - 2) {
if(block[1] == block[0]) {
h_first_sort_rank[2] < h_first_sort_rank[1] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
} else {
block[1] < block[0] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else {
h_first_sort_rank[1] < h_first_sort_rank[indexSecond+2] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else {
block[0] < block[indexSecond+1] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else if(indexFirst % 3 == 1) {
(h_first_sort_rank[indexFirst + 1] < h_first_sort_rank[indexSecond + 1]) ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
} else if(indexFirst % 3 == 2) {
if(block[indexFirst+1] == block[indexSecond+1]) {
if(indexFirst + 2 == originalLength || indexSecond+2 == originalLength) {
int itrIndexFirst = (indexFirst+2) % originalLength;
int itrIndexSecond = (indexSecond+2) % originalLength;
int foundDifference = 0;
while(itrIndexFirst % 3 == 0 || itrIndexSecond % 3 == 0) {
if(block[itrIndexFirst] != block[itrIndexSecond]) {
block[itrIndexFirst] < block[itrIndexSecond] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
foundDifference = 1;
break;
}
itrIndexFirst = (itrIndexFirst+1)%originalLength;
itrIndexSecond = (itrIndexSecond+1)%originalLength;
}
if(foundDifference == 1) {
continue;
}
h_first_sort_rank[itrIndexFirst] < h_first_sort_rank[itrIndexSecond] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
} else {
(h_first_sort_rank[indexFirst+2] < h_first_sort_rank[indexSecond+2]) ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else {
(block[indexFirst+1] < block[indexSecond+1]) ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
}
if(order[countOrderArr] == 0) {
s->origPtr = countOrderArr;
}
}
while(countFirstArr < firstSortLength) {
order[countOrderArr] = h_first_sort_index[countFirstArr];
if(order[countOrderArr] == 0) {
s->origPtr = countOrderArr;
}
countFirstArr++;
countOrderArr++;
}
while(countSecondArr < secondSortLength) {
order[countOrderArr] = h_second_sort_index[countSecondArr];
if(order[countOrderArr] == 0) {
s->origPtr = countOrderArr;
}
countSecondArr++;
countOrderArr++;
}
AssertH( s->origPtr != -1, 1003 );
return;
}
Bool blocksort_wrapper( EState *s ) {
BZ_INITIALISE_CRC( s->blockCRC );
if (s->nblock > 0) {
BZ_FINALISE_CRC ( s->blockCRC );
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
s->combinedCRC ^= s->blockCRC;
if (s->blockNo > 1) s->numZ = 0;
if (s->verbosity >= 2)
VPrintf4( " block %d: crc = 0x%08x, "
"combined CRC = 0x%08x, size = %d\n",
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
s->first_sort_length = gpuBlockSort( (UChar*) s->arr2, s->arr1, s->arr1_first_sort, s->arr1_second_sort, s->arr1_first_sort_rank, s->nblock, &(s->sortingDepth));
/* loop moved to merge two arrays
UInt32* ptr = s->ptr;
s->origPtr = -1;
Int32 i;
for (i = 0; i < s->nblock; i++)
if (ptr[i] == 0)
{ s->origPtr = i; break; };
AssertH( s->origPtr != -1, 1003 ); */
}
return True;
}
Bool mtf_huff_wrapper( EState* s, Bool is_last_block ) {
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
#ifdef PRINT_DEBUG
printf("BZ2_compressBlock %d\n",s->blockNo);
#endif
// start with a fresh buffer with every block
BZ2_bsInitWrite( s );
/*-- If this is the first block, create the stream header. --*/
if (s->blockNo == 1) {
#ifdef PRINT_DEBUG
printf("This is the first block\n");
#endif
bsPutUChar ( s, BZ_HDR_B );
bsPutUChar ( s, BZ_HDR_Z );
bsPutUChar ( s, BZ_HDR_h );
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
}
if (s->nblock > 0) {
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
/*-- Now the block's CRC, so it is in a known place. --*/
bsPutUInt32 ( s, s->blockCRC );
/*--
Now a single bit indicating (non-)randomisation.
As of version 0.9.5, we use a better sorting algorithm
which makes randomisation unnecessary. So always set
the randomised bit to 'no'. Of course, the decoder
still needs to be able to handle randomised blocks
so as to maintain backwards compatibility with
older versions of bzip2.
--*/
bsW(s,1,0);
bsW ( s, 24, s->origPtr );
generateMTFValues ( s );
sendMTFValues ( s );
}
/*-- If this is the last block, add the stream trailer. --*/
if (is_last_block) {
#ifdef PRINT_DEBUG
printf("This is the last block\n");
#endif
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
bsPutUInt32 ( s, s->combinedCRC );
if (s->verbosity >= 2)
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
bsFinishWrite ( s );
}
return True;
}
Bool BZ2_compressBlock_only_CPU ( EState* s, Bool is_last_block ) {
if (s->nblock > 0) {
BZ_FINALISE_CRC ( s->blockCRC );
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
s->combinedCRC ^= s->blockCRC;
if (s->blockNo > 1) s->numZ = 0;
if (s->verbosity >= 2)
VPrintf4( " block %d: crc = 0x%08x, "
"combined CRC = 0x%08x, size = %d\n",
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
BZ2_blockSort ( s );
}
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
/*-- If this is the first block, create the stream header. --*/
BZ2_bsInitWrite ( s );
if (s->blockNo == 1) {
bsPutUChar ( s, BZ_HDR_B );
bsPutUChar ( s, BZ_HDR_Z );
bsPutUChar ( s, BZ_HDR_h );
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
}
if (s->nblock > 0) {
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
/*-- Now the block's CRC, so it is in a known place. --*/
bsPutUInt32 ( s, s->blockCRC );
/*--
Now a single bit indicating (non-)randomisation.
As of version 0.9.5, we use a better sorting algorithm
which makes randomisation unnecessary. So always set
the randomised bit to 'no'. Of course, the decoder
still needs to be able to handle randomised blocks
so as to maintain backwards compatibility with
older versions of bzip2.
--*/
bsW(s,1,0);
bsW ( s, 24, s->origPtr );
generateMTFValues ( s );
sendMTFValues ( s );
}
/*-- If this is the last block, add the stream trailer. --*/
if (is_last_block) {
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
bsPutUInt32 ( s, s->combinedCRC );
if (s->verbosity >= 2)
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
bsFinishWrite ( s );
}
return True;
}
/*---------------------------------------------------*/
void BZ2_compressBlocks ( bz_stream* strm)
{
gpuSetDevice(0);
struct timespec t1, t2;
clock_gettime(CLOCK_MONOTONIC, &t1);
Int32 atomic_used_count = 1;
Int32 done_block_sort = 0;
Int32 done_block_sort_queue[BZ_MAX_STATE_COUNT];
Int32 done_huff_mtf = 0;
Int32 finalGPUBlocks = BZ_MAX_STATE_COUNT;
Int32 num_procs = omp_get_num_procs();
printf("Block size : %d\n",((EState *)strm->state[0])->nblockMAX + 19);
printf("numThreads read %d\n",((EState*)strm->state[0])->numThreads);
Int32 num_threads = ((EState*)strm->state[0])->numThreads/*+2*/;
omp_set_num_threads(num_threads+2);
printf("Number of additional CPU threads %d\n",num_threads+1);
#pragma omp parallel shared(done_block_sort, done_block_sort_queue, done_huff_mtf, finalGPUBlocks)
{
int threadID = omp_get_thread_num();
Int32 count;
Int32 currentHuffMtf = -1;
EState *s;
Bool is_last_block = False;
if(threadID == 0) {
struct timespec thread1_T1, thread1_T2;
clock_gettime(CLOCK_MONOTONIC, &thread1_T1);
blocksort_wrapper((EState*) strm->state[0]);
done_block_sort_queue[done_block_sort] = 0;
done_block_sort++;
while(true) {
#pragma omp critical
{
count = atomic_used_count;
atomic_used_count++;
}
if(count > strm->state_fill_count) {
finalGPUBlocks = done_block_sort;
break;
}
s = (EState *)strm->state[count];
if(blocksort_wrapper( s )) {
done_block_sort_queue[done_block_sort] = count;
done_block_sort++;
} else {
printf("blocksort_wrapper failed for blockNo %d\n",count);
exit(2);
}
}
clock_gettime(CLOCK_MONOTONIC, &thread1_T2);
double thread1_diff = thread1_T2.tv_sec - thread1_T1.tv_sec + ((thread1_T2.tv_nsec - thread1_T1.tv_nsec)/1000000000.0);
printf("[Time thread1] %lf\n",thread1_diff);
}
if(threadID == 1) {
struct timespec thread2_T1, thread2_T2;
clock_gettime(CLOCK_MONOTONIC, &thread2_T1);
double thread2_work = 0.0;
struct timespec thread2_work1, thread2_work2;
while(true) {
if(done_block_sort > done_huff_mtf) {
clock_gettime(CLOCK_MONOTONIC, &thread2_work1);
currentHuffMtf = done_block_sort_queue[done_huff_mtf];
done_huff_mtf++;
s = (EState *)strm->state[currentHuffMtf];
merge_two_sort_arrays ( s );
if(currentHuffMtf == strm->state_fill_count) {
is_last_block = True;
}
if( !mtf_huff_wrapper( s, is_last_block ) ) {
printf("mtf_huff_wrapper failed for blockNo %d\n",currentHuffMtf);
exit(2);
}
clock_gettime(CLOCK_MONOTONIC, &thread2_work2);
thread2_work+= thread2_work2.tv_sec - thread2_work1.tv_sec + ((thread2_work2.tv_nsec - thread2_work1.tv_nsec)/1000000000.0);
}
if(done_huff_mtf == finalGPUBlocks) {
break;
}
}
clock_gettime(CLOCK_MONOTONIC, &thread2_T2);
double thread2_diff = thread2_T2.tv_sec - thread2_T1.tv_sec + ((thread2_T2.tv_nsec - thread2_T1.tv_nsec)/1000000000.0);
printf("[Time thread2 work] %lf\n",thread2_work);
printf("[Time thread2] %lf\n",thread2_diff);
}
if(threadID >= 2) {
while(true) {
#pragma omp critical
{
count = atomic_used_count;
atomic_used_count++;
}
if(count > strm->state_fill_count) {
break;
}
s = (EState *)strm->state[count];
if(!BZ2_compressBlock_only_CPU( s, (Bool) count == strm->state_fill_count)) {
printf("BZ2_compressBlock_only_CPU failed at blockNo %d\n",count);
exit(2);
}
}
}
}
/*
UInt32 count;
#pragma omp parallel for
for(count = 0; count <= strm->state_fill_count; count++) {
EState *s = (EState*)strm->state[count];
if(!BZ2_compressBlock_only_CPU( s, (Bool) count == strm->state_fill_count)) {
printf("BZ2_compressBlock_only_CPU failed at blockNo %d\n",count);
exit(2);
}
} */
printf("Number of CPU threads %d\n",num_threads - 2);
printf("Out of the total %d blocks GPU did %d\n",strm->state_fill_count+1,finalGPUBlocks);
clock_gettime(CLOCK_MONOTONIC, &t2);
double t = t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
printf("total compression time (with overlap) %lf\n",t);
}
/* Global Variables for PThreads Producer-Consumer pipeline */
bz_stream* global_strm;
sem_t mutex, full, empty;
int buff[BZ_MAX_STATE_COUNT];
int producerCount = 0, consumerCount = 0;
void *produce(void *arg) {
int i;
struct timespec t1, t2;
double thread1_work = 0.0;
int maxSortingDepth = 0;
int avgSortingDepth = 0;
for(i = 0; i <= global_strm->state_fill_count; i++) {
clock_gettime(CLOCK_MONOTONIC, &t1);
EState *s = (EState*)global_strm->state[i];
if( !blocksort_wrapper( s )) {
printf("[ERROR] blocksort_wrapper_failed\n");
exit(1);
}
clock_gettime(CLOCK_MONOTONIC, &t2);
if(i!=global_strm->state_fill_count) {
thread1_work += t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
if(s->sortingDepth > maxSortingDepth) maxSortingDepth = s->sortingDepth;
avgSortingDepth += s->sortingDepth;
}
sem_wait(&empty);
sem_wait(&mutex);
buff[++producerCount] = i;
sem_post(&mutex);
sem_post(&full);
}
int divFactor = global_strm->state_fill_count;
printf("[Time BWT] %lf\n",thread1_work);
printf("[Average Time BWT] %lf\n",thread1_work/divFactor);
printf("[Max Sorting Depth] %d\n",maxSortingDepth);
printf("[Average Sorting Depth] %d\n",(int)((1.0*avgSortingDepth)/divFactor));
}
void *consume(void *arg) {
int item, i;
bool is_last_block = false;
struct timespec t1, t2;
struct timespec tmerge;
double tmergeTotal = 0.0;
double thread2_work = 0.0;
for(i = 0; i <= global_strm->state_fill_count; i++) {
sem_wait(&full);
sem_wait(&mutex);
item = buff[++consumerCount];
sem_post(&mutex);
sem_post(&empty);
clock_gettime(CLOCK_MONOTONIC, &t1);
EState *s = (EState*)global_strm->state[item];
merge_two_sort_arrays( s );
clock_gettime(CLOCK_MONOTONIC, &tmerge);
if(item == global_strm->state_fill_count) {
is_last_block = true;
} else {
tmergeTotal += tmerge.tv_sec - t1.tv_sec + ((tmerge.tv_nsec - t1.tv_nsec)/1000000000.0);
}
if(!mtf_huff_wrapper( s, is_last_block )) {
printf("[ERROR] mtf_huff_wrapper\n");
exit(1);
}
clock_gettime(CLOCK_MONOTONIC, &t2);
thread2_work += t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
}
int divFactor = global_strm->state_fill_count;
printf("[Total Time Merge] %lf\n",tmergeTotal);
printf("[Average Time Merge] %lf\n",tmergeTotal/divFactor);
printf("[Time Merge, MTF+HUFF] %lf\n",thread2_work);
}
void BZ2_compressBlocks_pthreads ( bz_stream* strm)
{
printf("[BZ2_compressBlocks_pthreads] Total Blocks : %d, Block Size %d\n", strm->state_fill_count, ((EState*)strm->state[0])->nblockMAX);
gpuSetDevice(0);
global_strm = strm;
struct timespec t1, t2;
clock_gettime(CLOCK_MONOTONIC, &t1);
pthread_t tid1, tid2;
sem_init(&mutex, 0, 1);
sem_init(&full, 0, 0);
sem_init(&empty, 0, strm->state_fill_count);
pthread_create(&tid1, NULL, produce, NULL);
pthread_create(&tid2, NULL, consume, NULL);
pthread_join(tid1, NULL);
pthread_join(tid2, NULL);
clock_gettime(CLOCK_MONOTONIC, &t2);
double t = t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
printf("total compression time (with pthreads overlap) %lf\n",t);
}
void BZ2_compressBlocks_without_overlap (bz_stream* strm) {
struct timespec t1, t2;
clock_gettime(CLOCK_MONOTONIC, &t1);
UInt32 count;
EState *s;
Bool is_last_block = False;
for(count = 0; count <= strm->state_fill_count; count++) {
s = (EState *)strm->state[count];
BZ_INITIALISE_CRC( s->blockCRC );
if(count == strm->state_fill_count) {
is_last_block = True;
}
if (s->nblock > 0) {
BZ_FINALISE_CRC ( s->blockCRC );
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
s->combinedCRC ^= s->blockCRC;
if (s->blockNo > 1) s->numZ = 0;
if (s->verbosity >= 2)
VPrintf4( " block %d: crc = 0x%08x, "
"combined CRC = 0x%08x, size = %d\n",
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
s->first_sort_length = gpuBlockSort( (UChar*) s->arr2, s->arr1, s->arr1_first_sort, s->arr1_second_sort, s->arr1_first_sort_rank, s->nblock, &(s->sortingDepth));
merge_two_sort_arrays( s );
UInt32* ptr = s->ptr;
s->origPtr = -1;
Int32 i;
for (i = 0; i < s->nblock; i++)
if (ptr[i] == 0)
{ s->origPtr = i; break; };
AssertH( s->origPtr != -1, 1003 );
}
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
#ifdef PRINT_DEBUG
printf("BZ2_compressBlock %d\n",s->blockNo);
#endif
// start with a fresh buffer with every block
BZ2_bsInitWrite( s );
/*-- If this is the first block, create the stream header. --*/
if (s->blockNo == 1) {
#ifdef PRINT_DEBUG
printf("This is the first block\n");
#endif
//BZ2_bsInitWrite ( s );
bsPutUChar ( s, BZ_HDR_B );
bsPutUChar ( s, BZ_HDR_Z );
bsPutUChar ( s, BZ_HDR_h );
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
}
if (s->nblock > 0) {
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
/*-- Now the block's CRC, so it is in a known place. --*/
bsPutUInt32 ( s, s->blockCRC );
/*--
Now a single bit indicating (non-)randomisation.
As of version 0.9.5, we use a better sorting algorithm
which makes randomisation unnecessary. So always set
the randomised bit to 'no'. Of course, the decoder
still needs to be able to handle randomised blocks
so as to maintain backwards compatibility with
older versions of bzip2.
--*/
bsW(s,1,0);
bsW ( s, 24, s->origPtr );
generateMTFValues ( s );
sendMTFValues ( s );
}
/*-- If this is the last block, add the stream trailer. --*/
if (is_last_block) {
#ifdef PRINT_DEBUG
printf("This is the last block\n");
#endif
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
bsPutUInt32 ( s, s->combinedCRC );
if (s->verbosity >= 2)
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
bsFinishWrite ( s );
}
}
clock_gettime(CLOCK_MONOTONIC, &t2);
double t = t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
printf("total compression time (without overlap) %lf\n",t);
}
/*-------------------------------------------------------------*/
/*--- end compress.c ---*/
/*-------------------------------------------------------------*/
|
Quaternion.h | // libdas: DENG asset handling management library
// licence: Apache, see LICENCE file
// file: Quaternion.h - Quaternion structure implementation
// author: Karl-Mihkel Ott
#ifndef QUATERNION_H
#define QUATERNION_H
#ifndef USE_SIMD
#define USE_SIMD
#include <xmmintrin.h>
#include <smmintrin.h>
#endif
namespace Libdas {
/// Fast 3D vector cross product using SIMD instructions
inline const __m128 FastCross(const __m128 &_vec1, const __m128 &_vec2) {
__m128 tmp0 = _mm_shuffle_ps(_vec1, _vec1, _MM_SHUFFLE(3, 0, 2, 1));
__m128 tmp1 = _mm_shuffle_ps(_vec2, _vec2, _MM_SHUFFLE(3, 1, 0, 2));
__m128 tmp2 = _mm_mul_ps(tmp0, _vec2);
__m128 tmp3 = _mm_mul_ps(tmp0, tmp1);
__m128 tmp4 = _mm_shuffle_ps(tmp2, tmp2, _MM_SHUFFLE(3, 0, 2, 1));
return _mm_sub_ps(tmp3, tmp4);
}
inline const float FastDot(const __m128 &_vec1, const __m128 &_vec2) {
__m128 mul_res, shuf_reg, sums_reg;
mul_res = _mm_mul_ps(_vec1, _vec2);
shuf_reg = _mm_movehdup_ps(mul_res);
sums_reg = _mm_add_ps(mul_res, shuf_reg);
shuf_reg = _mm_movehl_ps(shuf_reg, sums_reg);
sums_reg = _mm_add_ss(sums_reg, shuf_reg);
return _mm_cvtss_f32(sums_reg);
}
/// Quaternion structure for Libdas
struct Quaternion {
float x, y, z, w;
Quaternion(float _x, float _y, float _z, float _w) : x(_x), y(_y), z(_z), w(_w) {}
Quaternion() : x(0), y(0), z(0), w(0) {}
Quaternion(const float *_a) : x(_a[0]), y(_a[1]), z(_a[2]), w(_a[3]) {}
////////////////////////////////////
// ***** Operator overloads ***** //
////////////////////////////////////
/**
* Calculate Grassman product of two quaternions
*/
inline Quaternion operator*(const Quaternion &_q) {
const __m128 pw_vec = _mm_set_ps1(w);
const __m128 qw_vec = _mm_set_ps1(_q.w);
const __m128 p_vec = _mm_set_ps(0, z, y, x);
const __m128 q_vec = _mm_set_ps(0, _q.z, _q.y, _q.x);
const __m128 vec3 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(pw_vec, q_vec), _mm_mul_ps(qw_vec, p_vec)), FastCross(p_vec, q_vec));
float scalar = w * _q.w - FastDot(p_vec, q_vec);
Quaternion out;
_mm_storeu_ps(&out.x, vec3);
out.w = scalar;
return out;
}
inline Quaternion operator*(const float _c) {
Quaternion out;
const __m128 c = _mm_set_ps1(_c);
const __m128 vec = _mm_set_ps(w, z, y, x);
_mm_storeu_ps(&out.x, _mm_mul_ps(vec, c));
return out;
}
inline Quaternion operator/(const float _c) {
Quaternion out;
const __m128 c = _mm_set_ps1(1 / _c);
const __m128 vec = _mm_set_ps(w, z, y, x);
_mm_storeu_ps(&out.x, _mm_mul_ps(vec, c));
return out;
}
/**
* Calculate magnitude of quaternion
*/
inline float Magnitude() {
float *ptr = &x;
#pragma omp simd
float sum = 0.0f;
for(int i = 0; i < 4; i++)
sum += ptr[i] * ptr[i];
return sqrtf(sum);
}
#ifdef VECTOR_H
inline Vector4<float> operator*(const Vector4<float> &_vec) {
Quaternion vq = Quaternion(&_vec.first);
vq.w = 0;
Quaternion q = *this * vq * this->Inverse();
return Vector4<float>(q.x, q.y, q.z, q.w);
}
#endif
#ifdef MATRIX_H
// not using simd here for now
Matrix3<float> ExpandToMatrix3() {
float dxx = 2 * x * x, dyy = 2 * y * y, dzz = 2 * z * z;
float dxy = 2 * x * y, dxz = 2 * x * z, dxw = 2 * x * w;
float dyz = 2 * y * z, dyw = 2 * y * w;
float dzw = 2 * z * w;
return Matrix3<float> {
{1 - dyy - dzz, dxy + dzw, dxz - dyw},
{dxy - dzw, 1 - dxx - dzz, dyz + dxw},
{dxz + dyw, dyz - dxw, 1 - dxx - dyy}
};
}
Matrix4<float> ExpandToMatrix4() {
float dxx = 2 * x * x, dyy = 2 * y * y, dzz = 2 * z * z;
float dxy = 2 * x * y, dxz = 2 * x * z, dxw = 2 * x * w;
float dyz = 2 * y * z, dyw = 2 * y * w;
float dzw = 2 * z * w;
return Matrix4<float> {
{1 - dyy - dzz, dxy + dzw, dxz - dyw, 0},
{dxy - dzw, 1 - dxx - dzz, dyz + dxw, 0},
{dxz + dyw, dyz - dxw, 1 - dxx - dyy, 0},
{0, 0, 0, 1}
};
}
static Quaternion MatrixToQuaternion(const Libdas::Matrix4<float> &_mat) {
Quaternion q;
float diag_sum = _mat.row1.first + _mat.row2.second + _mat.row3.third;
// diagonal sum is positive
if(diag_sum > 0) {
q.w = sqrtf(1.0f + diag_sum) / 2;
float w4 = 4 * q.w;
q.x = (_mat.row3.second - _mat.row2.third) / w4;
q.y = (_mat.row1.third - _mat.row3.first) / w4;
q.z = (_mat.row2.first - _mat.row1.second) / w4;
}
// diagonal sum is negative
else if(_mat.row1.first > _mat.row2.second && _mat.row1.first > _mat.row3.third) {
float s = sqrtf(1.0f + _mat.row1.first - _mat.row2.second - _mat.row3.third) * 2;
q.w = (_mat.row3.second - _mat.row2.third ) / s;
q.x = s / 4;
q.y = (_mat.row1.second + _mat.row2.first) / s;
q.z = (_mat.row1.third + _mat.row3.first) / s;
}
else if(_mat.row2.second > _mat.row3.third) {
float s = sqrtf(1.0f + _mat.row2.second - _mat.row1.first - _mat.row3.third) * 2;
q.w = (_mat.row1.third - _mat.row3.first) / s;
q.x = (_mat.row1.second + _mat.row2.first) / s;
q.y = s / 4;
q.z = (_mat.row2.third + _mat.row3.second) /s;
}
else {
float s = sqrtf(1.0f + _mat.row3.third - _mat.row1.first - _mat.row2.second) * 2;
q.w = (_mat.row2.first - _mat.row1.second) / s;
q.x = (_mat.row1.third + _mat.row3.first) / s;
q.y = (_mat.row2.third + _mat.row3.second) / s;
q.z = s / 4;
}
return q;
}
#endif
/**
* Calculate the conjugate of quaternion
*/
inline Quaternion Conjugate() {
return Quaternion(-x, -y, -z, w);
}
/**
* Calculate the inverse of quaternion using formula: q* / |q|
*/
inline Quaternion Inverse() {
return Conjugate() / Magnitude();
}
};
}
#endif
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
chop_view=AcquireCacheView(chop_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=indexes[x];
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=indexes[x];
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->red=(Quantum) (QuantumRange-PixelIntensityToQuantum(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=(Quantum) (QuantumRange-PixelIntensityToQuantum(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=(Quantum) (QuantumRange-PixelIntensityToQuantum(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
indexes[x]=(IndexPacket) (QuantumRange-PixelIntensityToQuantum(p));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((size_t) (page.x+page.width) > image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((size_t) (page.y+page.height) > image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
crop_view=AcquireCacheView(crop_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict crop_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() will crop a single image, into a posible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,const RectangleInfo
% *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t MagickRound(MagickRealType x)
{
/*
Round the fraction to nearest integer.
*/
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry, ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
/*
MagickBooleanType
proceed;
MagickProgressMonitor
progress_monitor;
MagickOffsetType
i;
MagickSizeType
number_images;
*/
size_t
height,
width;
PointInfo
delta,
offset;
RectangleInfo
crop;
/*
Crop into NxM tiles (@ flag)
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
/*proceed=MagickTrue;
i=0;
number_images=geometry.width*geometry.height;*/
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType)
(offset.y-(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType)
(offset.y+(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType)
(offset.y-(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType)
(offset.y+(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
/*progress_monitor=SetImageProgressMonitor(image,
(MagickProgressMonitor) NULL,image->client_data);*/
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType)
(offset.x-(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=+delta.x; /* increment now to find height*/
crop.width=(size_t) MagickRound((MagickRealType)
(offset.x+(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=+delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType)
(offset.x+(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
/*(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,CropImageTag,i++,number_images);
if (proceed == MagickFalse)
break;
*/
if (next == (Image *) NULL)
break;
/*(void) SetImageProgressMonitor(next,progress_monitor,
next->client_data);*/
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
/*if (proceed == MagickFalse)
break;*/
}
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) ||
(image->rows > geometry.height))
{
/*
MagickBooleanType
proceed;
MagickProgressMonitor
progress_monitor;
MagickOffsetType
i;
MagickSizeType
number_images;
*/
size_t
height,
width;
ssize_t
x,
y;
RectangleInfo
page;
/*
Crop into tiles of fixed size WxH.
*/
width=geometry.width;
if (width == 0)
width=image->page.width;
height=geometry.height;
if (height == 0)
height=image->page.height;
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (image->page.height == 0)
page.height=image->rows;
next=NewImageList();
/*proceed=MagickTrue;
i=0;
number_images=0;
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
number_images++;
*/
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
/*progress_monitor=SetImageProgressMonitor(image,
(MagickProgressMonitor) NULL,image->client_data);*/
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
/*(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,CropImageTag,i++,number_images);
if (proceed == MagickFalse)
break;
*/
if (next == (Image *) NULL)
break;
/*(void) SetImageProgressMonitor(next,progress_monitor,
next->client_data);*/
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
/*if (proceed == MagickFalse)
break;*/
}
return(crop_image);
}
/*
Action of crop results in no change in image!
This is not an error so return a clone of the image!
*/
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
excerpt_view=AcquireCacheView(excerpt_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict excerpt_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(extent_image,DirectClass) == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
if (extent_image->background_color.opacity != OpaqueOpacity)
extent_image->matte=MagickTrue;
(void) SetImageBackgroundColor(extent_image);
(void) CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireCacheView(image);
flip_view=AcquireCacheView(flip_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flip_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireCacheView(image);
flop_view=AcquireCacheView(flop_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
flop_indexes[flop_image->columns-x-1]=indexes[x];
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyImageRegion(Image *destination,
const Image *source,const size_t columns,const size_t rows,
const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy,
ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
source_view=AcquireCacheView(source);
destination_view=AcquireCacheView(destination);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict destination_indexes;
register PixelPacket
*restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status|=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status|=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status|=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
splice_view=AcquireCacheView(splice_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetRedPixelComponent(q,GetRedPixelComponent(p));
SetGreenPixelComponent(q,GetGreenPixelComponent(p));
SetBluePixelComponent(q,GetBluePixelComponent(p));
SetOpacityPixelComponent(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetOpacityPixelComponent(q,GetOpacityPixelComponent(p));
if (image->colorspace == CMYKColorspace)
splice_indexes[x]=(*indexes++);
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetRedPixelComponent(q,GetRedPixelComponent(p));
SetGreenPixelComponent(q,GetGreenPixelComponent(p));
SetBluePixelComponent(q,GetBluePixelComponent(p));
SetOpacityPixelComponent(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetOpacityPixelComponent(q,GetOpacityPixelComponent(p));
if (image->colorspace == CMYKColorspace)
splice_indexes[x]=(*indexes++);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
image->columns,1,exception);
if ((y < 0) || (y >= (ssize_t) splice_image->rows))
continue;
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetRedPixelComponent(q,GetRedPixelComponent(p));
SetGreenPixelComponent(q,GetGreenPixelComponent(p));
SetBluePixelComponent(q,GetBluePixelComponent(p));
SetOpacityPixelComponent(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetOpacityPixelComponent(q,GetOpacityPixelComponent(p));
if (image->colorspace == CMYKColorspace)
splice_indexes[x]=(*indexes++);
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetRedPixelComponent(q,GetRedPixelComponent(p));
SetGreenPixelComponent(q,GetGreenPixelComponent(p));
SetBluePixelComponent(q,GetBluePixelComponent(p));
SetOpacityPixelComponent(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetOpacityPixelComponent(q,GetOpacityPixelComponent(p));
if (image->colorspace == CMYKColorspace)
splice_indexes[x]=(*indexes++);
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be depreciated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status|=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
transpose_view=AcquireCacheView(transpose_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transpose_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
transverse_view=AcquireCacheView(transverse_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transverse_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
transverse_indexes[image->columns-x-1]=indexes[x];
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
ast-dump-openmp-declare-variant-extensions.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int picked1() { return 0; }
int picked2() { return 0; }
int picked3();
int picked4();
int picked5() { return 0; }
int picked6() { return 0; }
int picked7() { return 0; }
int not_picked1() { return 1; }
int not_picked2() { return 2; }
int not_picked3();
int not_picked4();
int not_picked5();
int not_picked6();
#pragma omp declare variant(picked1) match(implementation={extension(match_any)}, device={kind(cpu, gpu)})
int base1() { return 3; }
#pragma omp declare variant(picked2) match(implementation={extension(match_none)}, device={kind(gpu, fpga)})
int base2() { return 4; }
#pragma omp declare variant(picked3) match(implementation={vendor(pgi), extension(match_any)}, device={kind(cpu, gpu)})
int base3() { return 5; }
#pragma omp declare variant(picked4) match(user={condition(0)}, implementation={extension(match_none)}, device={kind(gpu, fpga)})
int base4() { return 6; }
#pragma omp declare variant(picked5) match(user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu)})
int base5() { return 7; }
#pragma omp declare variant(not_picked1) match(implementation={extension(match_any)}, device={kind(gpu, fpga)})
int base6() { return 0; }
#pragma omp declare variant(not_picked2) match(implementation={extension(match_none)}, device={kind(gpu, cpu)})
int base7() { return 0; }
#pragma omp declare variant(not_picked3) match(implementation={vendor(llvm), extension(match_any)}, device={kind(fpga, gpu)})
int base8() { return 0; }
#pragma omp declare variant(not_picked4) match(user={condition(1)}, implementation={extension(match_none)}, device={kind(gpu, fpga)})
int base9() { return 0; }
#pragma omp declare variant(not_picked5) match(user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu, gpu)})
int base10() { return 0; }
#pragma omp declare variant(not_picked6) match(implementation={extension(match_any)})
int base11() { return 0; }
#pragma omp declare variant(picked6) match(implementation={extension(match_all)})
int base12() { return 8; }
#pragma omp declare variant(picked7) match(implementation={extension(match_none)})
int base13() { return 9; }
#pragma omp begin declare variant match(implementation={extension(match_any)}, device={kind(cpu, gpu)})
int overloaded1() { return 0; }
#pragma omp end declare variant
int overloaded2() { return 1; }
#pragma omp begin declare variant match(implementation={extension(match_none)}, device={kind(fpga, gpu)})
int overloaded2() { return 0; }
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={extension(match_none)}, device={kind(cpu)})
NOT PARSED
#pragma omp end declare variant
int picked3() { return 0; }
int picked4() { return 0; }
int not_picked3() { return 10; }
int not_picked4() { return 11; }
int not_picked5() { return 12; }
int not_picked6() { return 13; }
int test() {
// Should return 0.
return base1() + base2() + base3() + base4() + base5() + base6() + base7() +
base8() + base9() + base10() + base11() + base12() + base13() +
overloaded1() + overloaded2();
}
// 1) All "picked" versions are called but none of the "non_picked" ones is.
// 2) The overloaded functions that return 0 are called.
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:27> col:5 referenced picked1 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] <line:6:1, col:27> col:5 referenced picked2 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <line:7:1, col:13> col:5 referenced picked3 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_9:0x[a-z0-9]*]] <line:8:1, col:13> col:5 referenced picked4 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:9:1, col:27> col:5 referenced picked5 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:10:1, col:27> col:5 referenced picked6 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_18:0x[a-z0-9]*]] <line:11:1, col:27> col:5 referenced picked7 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_19:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_20:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_21:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:12:1, col:31> col:5 referenced not_picked1 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_25:0x[a-z0-9]*]] <col:28> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_26:0x[a-z0-9]*]] <line:13:1, col:31> col:5 referenced not_picked2 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_27:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_28:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_29:0x[a-z0-9]*]] <col:28> 'int' 2
// CHECK-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:14:1, col:17> col:5 referenced not_picked3 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_31:0x[a-z0-9]*]] <line:15:1, col:17> col:5 referenced not_picked4 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <line:16:1, col:17> col:5 referenced not_picked5 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_33:0x[a-z0-9]*]] <line:17:1, col:17> col:5 referenced not_picked6 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_34:0x[a-z0-9]*]] <line:20:1, col:25> col:5 used base1 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_35:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_36:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_37:0x[a-z0-9]*]] <col:22> 'int' 3
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_38:0x[a-z0-9]*]] <line:19:1, col:107> Implicit implementation={extension(match_any)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'picked1' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_40:0x[a-z0-9]*]] <line:23:1, col:25> col:5 used base2 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:22> 'int' 4
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_44:0x[a-z0-9]*]] <line:22:1, col:109> Implicit implementation={extension(match_none)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'picked2' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_46:0x[a-z0-9]*]] <line:26:1, col:25> col:5 used base3 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_47:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_48:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_49:0x[a-z0-9]*]] <col:22> 'int' 5
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_50:0x[a-z0-9]*]] <line:25:1, col:120> Implicit implementation={vendor(pgi), extension(match_any)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_51:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_8]] 'picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_52:0x[a-z0-9]*]] <line:29:1, col:25> col:5 used base4 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_53:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_54:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_55:0x[a-z0-9]*]] <col:22> 'int' 6
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_56:0x[a-z0-9]*]] <line:28:1, col:130> Implicit user={condition(0)}, implementation={extension(match_none)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_57:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_9]] 'picked4' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_58:0x[a-z0-9]*]] <line:32:1, col:25> col:5 used base5 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_59:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_60:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_61:0x[a-z0-9]*]] <col:22> 'int' 7
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_62:0x[a-z0-9]*]] <line:31:1, col:123> Implicit user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_63:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'picked5' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_64:0x[a-z0-9]*]] <line:35:1, col:25> col:5 used base6 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_65:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_66:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_67:0x[a-z0-9]*]] <col:22> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_68:0x[a-z0-9]*]] <line:34:1, col:112> Implicit implementation={extension(match_any)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_69:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_22]] 'not_picked1' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_70:0x[a-z0-9]*]] <line:38:1, col:25> col:5 used base7 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_71:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_72:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_73:0x[a-z0-9]*]] <col:22> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_74:0x[a-z0-9]*]] <line:37:1, col:112> Implicit implementation={extension(match_none)}, device={kind(gpu, cpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_75:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_26]] 'not_picked2' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_76:0x[a-z0-9]*]] <line:41:1, col:25> col:5 used base8 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_77:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_78:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_79:0x[a-z0-9]*]] <col:22> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_80:0x[a-z0-9]*]] <line:40:1, col:126> Implicit implementation={vendor(llvm), extension(match_any)}, device={kind(fpga, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_81:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_30]] 'not_picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_82:0x[a-z0-9]*]] <line:44:1, col:25> col:5 used base9 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_83:0x[a-z0-9]*]] <col:13, col:25>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_84:0x[a-z0-9]*]] <col:15, col:22>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_85:0x[a-z0-9]*]] <col:22> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_86:0x[a-z0-9]*]] <line:43:1, col:134> Implicit user={condition(1)}, implementation={extension(match_none)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_87:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_31]] 'not_picked4' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_88:0x[a-z0-9]*]] <line:47:1, col:26> col:5 used base10 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_89:0x[a-z0-9]*]] <col:14, col:26>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_90:0x[a-z0-9]*]] <col:16, col:23>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_91:0x[a-z0-9]*]] <col:23> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_92:0x[a-z0-9]*]] <line:46:1, col:132> Implicit user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_93:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_32]] 'not_picked5' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_94:0x[a-z0-9]*]] <line:50:1, col:26> col:5 used base11 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_95:0x[a-z0-9]*]] <col:14, col:26>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_96:0x[a-z0-9]*]] <col:16, col:23>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_97:0x[a-z0-9]*]] <col:23> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_98:0x[a-z0-9]*]] <line:49:1, col:86> Implicit implementation={extension(match_any)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_99:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_33]] 'not_picked6' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_100:0x[a-z0-9]*]] <line:53:1, col:26> col:5 used base12 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_101:0x[a-z0-9]*]] <col:14, col:26>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_102:0x[a-z0-9]*]] <col:16, col:23>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_103:0x[a-z0-9]*]] <col:23> 'int' 8
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_104:0x[a-z0-9]*]] <line:52:1, col:82> Implicit implementation={extension(match_all)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_105:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'picked6' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_106:0x[a-z0-9]*]] <line:56:1, col:26> col:5 used base13 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_107:0x[a-z0-9]*]] <col:14, col:26>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_108:0x[a-z0-9]*]] <col:16, col:23>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_109:0x[a-z0-9]*]] <col:23> 'int' 9
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_110:0x[a-z0-9]*]] <line:55:1, col:83> Implicit implementation={extension(match_none)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_111:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_18]] 'picked7' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_112:0x[a-z0-9]*]] <line:59:1, col:17> col:5 implicit used overloaded1 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_113:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(match_any)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_114:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_115:0x[a-z0-9]*]] 'overloaded1[implementation={extension(match_any)}, device={kind(cpu, gpu)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_115]] <col:1, col:31> col:1 overloaded1[implementation={extension(match_any)}, device={kind(cpu, gpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_116:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_117:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_118:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_119:0x[a-z0-9]*]] <line:62:1, col:31> col:5 used overloaded2 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_120:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_121:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_122:0x[a-z0-9]*]] <col:28> 'int' 1
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_123:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(match_none)}, device={kind(fpga, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_124:0x[a-z0-9]*]] <line:64:1> 'int ({{.*}})' {{.*}}Function [[ADDR_125:0x[a-z0-9]*]] 'overloaded2[implementation={extension(match_none)}, device={kind(fpga, gpu)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_125]] <col:1, col:31> col:1 overloaded2[implementation={extension(match_none)}, device={kind(fpga, gpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_126:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_127:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_128:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_129:0x[a-z0-9]*]] prev [[ADDR_8]] <line:72:1, col:27> col:5 picked3 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_130:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_131:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_132:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_133:0x[a-z0-9]*]] prev [[ADDR_9]] <line:73:1, col:27> col:5 picked4 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_134:0x[a-z0-9]*]] <col:15, col:27>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_135:0x[a-z0-9]*]] <col:17, col:24>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_136:0x[a-z0-9]*]] <col:24> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_137:0x[a-z0-9]*]] prev [[ADDR_30]] <line:74:1, col:32> col:5 not_picked3 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_138:0x[a-z0-9]*]] <col:19, col:32>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_139:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_140:0x[a-z0-9]*]] <col:28> 'int' 10
// CHECK-NEXT: |-FunctionDecl [[ADDR_141:0x[a-z0-9]*]] prev [[ADDR_31]] <line:75:1, col:32> col:5 not_picked4 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_142:0x[a-z0-9]*]] <col:19, col:32>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_143:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_144:0x[a-z0-9]*]] <col:28> 'int' 11
// CHECK-NEXT: |-FunctionDecl [[ADDR_145:0x[a-z0-9]*]] prev [[ADDR_32]] <line:76:1, col:32> col:5 not_picked5 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_146:0x[a-z0-9]*]] <col:19, col:32>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_147:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_148:0x[a-z0-9]*]] <col:28> 'int' 12
// CHECK-NEXT: |-FunctionDecl [[ADDR_149:0x[a-z0-9]*]] prev [[ADDR_33]] <line:77:1, col:32> col:5 not_picked6 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_150:0x[a-z0-9]*]] <col:19, col:32>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_151:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_152:0x[a-z0-9]*]] <col:28> 'int' 13
// CHECK-NEXT: `-FunctionDecl [[ADDR_153:0x[a-z0-9]*]] <line:79:1, line:84:1> line:79:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_154:0x[a-z0-9]*]] <col:12, line:84:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_155:0x[a-z0-9]*]] <line:81:3, line:83:38>
// CHECK-NEXT: `-BinaryOperator [[ADDR_156:0x[a-z0-9]*]] <line:81:10, line:83:38> 'int' '+'
// CHECK-NEXT: |-BinaryOperator [[ADDR_157:0x[a-z0-9]*]] <line:81:10, line:83:22> 'int' '+'
// CHECK-NEXT: | |-BinaryOperator [[ADDR_158:0x[a-z0-9]*]] <line:81:10, line:82:70> 'int' '+'
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_159:0x[a-z0-9]*]] <line:81:10, line:82:59> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_160:0x[a-z0-9]*]] <line:81:10, line:82:48> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator [[ADDR_161:0x[a-z0-9]*]] <line:81:10, line:82:37> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator [[ADDR_162:0x[a-z0-9]*]] <line:81:10, line:82:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator [[ADDR_163:0x[a-z0-9]*]] <line:81:10, line:82:16> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator [[ADDR_164:0x[a-z0-9]*]] <line:81:10, col:76> 'int' '+'
// CHECK-NEXT: | | | | | | | | |-BinaryOperator [[ADDR_165:0x[a-z0-9]*]] <col:10, col:66> 'int' '+'
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator [[ADDR_166:0x[a-z0-9]*]] <col:10, col:56> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | |-BinaryOperator [[ADDR_167:0x[a-z0-9]*]] <col:10, col:46> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | | |-BinaryOperator [[ADDR_168:0x[a-z0-9]*]] <col:10, col:36> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | | | |-BinaryOperator [[ADDR_169:0x[a-z0-9]*]] <col:10, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | | | | |-PseudoObjectExpr [[ADDR_170:0x[a-z0-9]*]] <col:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | |-CallExpr [[ADDR_171:0x[a-z0-9]*]] <col:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_172:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_173:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_34]] 'base1' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | | | | `-CallExpr [[ADDR_174:0x[a-z0-9]*]] <line:19:29, line:81:16> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_175:0x[a-z0-9]*]] <line:19:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_39]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'picked1' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_176:0x[a-z0-9]*]] <line:81:20, col:26> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | |-CallExpr [[ADDR_177:0x[a-z0-9]*]] <col:20, col:26> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_178:0x[a-z0-9]*]] <col:20> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_179:0x[a-z0-9]*]] <col:20> 'int ({{.*}})' {{.*}}Function [[ADDR_40]] 'base2' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | | | `-CallExpr [[ADDR_180:0x[a-z0-9]*]] <line:22:29, line:81:26> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_181:0x[a-z0-9]*]] <line:22:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_45]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'picked2' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_182:0x[a-z0-9]*]] <line:81:30, col:36> 'int'
// CHECK-NEXT: | | | | | | | | | | | | |-CallExpr [[ADDR_183:0x[a-z0-9]*]] <col:30, col:36> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_184:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_185:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_46]] 'base3' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | | `-CallExpr [[ADDR_186:0x[a-z0-9]*]] <line:25:29, line:81:36> 'int'
// CHECK-NEXT: | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_187:0x[a-z0-9]*]] <line:25:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | `-DeclRefExpr [[ADDR_51]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_8]] 'picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_188:0x[a-z0-9]*]] <line:81:40, col:46> 'int'
// CHECK-NEXT: | | | | | | | | | | | |-CallExpr [[ADDR_189:0x[a-z0-9]*]] <col:40, col:46> 'int'
// CHECK-NEXT: | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_190:0x[a-z0-9]*]] <col:40> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | `-DeclRefExpr [[ADDR_191:0x[a-z0-9]*]] <col:40> 'int ({{.*}})' {{.*}}Function [[ADDR_52]] 'base4' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | `-CallExpr [[ADDR_192:0x[a-z0-9]*]] <line:28:29, line:81:46> 'int'
// CHECK-NEXT: | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_193:0x[a-z0-9]*]] <line:28:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr [[ADDR_57]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_9]] 'picked4' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | `-PseudoObjectExpr [[ADDR_194:0x[a-z0-9]*]] <line:81:50, col:56> 'int'
// CHECK-NEXT: | | | | | | | | | | |-CallExpr [[ADDR_195:0x[a-z0-9]*]] <col:50, col:56> 'int'
// CHECK-NEXT: | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_196:0x[a-z0-9]*]] <col:50> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr [[ADDR_197:0x[a-z0-9]*]] <col:50> 'int ({{.*}})' {{.*}}Function [[ADDR_58]] 'base5' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | `-CallExpr [[ADDR_198:0x[a-z0-9]*]] <line:31:29, line:81:56> 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr [[ADDR_199:0x[a-z0-9]*]] <line:31:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr [[ADDR_63]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'picked5' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | `-CallExpr [[ADDR_200:0x[a-z0-9]*]] <line:81:60, col:66> 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr [[ADDR_201:0x[a-z0-9]*]] <col:60> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr [[ADDR_202:0x[a-z0-9]*]] <col:60> 'int ({{.*}})' {{.*}}Function [[ADDR_64]] 'base6' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | `-CallExpr [[ADDR_203:0x[a-z0-9]*]] <col:70, col:76> 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr [[ADDR_204:0x[a-z0-9]*]] <col:70> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr [[ADDR_205:0x[a-z0-9]*]] <col:70> 'int ({{.*}})' {{.*}}Function [[ADDR_70]] 'base7' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | `-PseudoObjectExpr [[ADDR_206:0x[a-z0-9]*]] <line:82:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | |-CallExpr [[ADDR_207:0x[a-z0-9]*]] <col:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr [[ADDR_208:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr [[ADDR_209:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_76]] 'base8' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | `-CallExpr [[ADDR_210:0x[a-z0-9]*]] <line:40:29, line:82:16> 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr [[ADDR_211:0x[a-z0-9]*]] <line:40:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr [[ADDR_81]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_30]] 'not_picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | `-CallExpr [[ADDR_212:0x[a-z0-9]*]] <line:82:20, col:26> 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr [[ADDR_213:0x[a-z0-9]*]] <col:20> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | `-DeclRefExpr [[ADDR_214:0x[a-z0-9]*]] <col:20> 'int ({{.*}})' {{.*}}Function [[ADDR_82]] 'base9' 'int ({{.*}})'
// CHECK-NEXT: | | | | | `-CallExpr [[ADDR_215:0x[a-z0-9]*]] <col:30, col:37> 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_216:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_217:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_88]] 'base10' 'int ({{.*}})'
// CHECK-NEXT: | | | | `-CallExpr [[ADDR_218:0x[a-z0-9]*]] <col:41, col:48> 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_219:0x[a-z0-9]*]] <col:41> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_220:0x[a-z0-9]*]] <col:41> 'int ({{.*}})' {{.*}}Function [[ADDR_94]] 'base11' 'int ({{.*}})'
// CHECK-NEXT: | | | `-PseudoObjectExpr [[ADDR_221:0x[a-z0-9]*]] <col:52, col:59> 'int'
// CHECK-NEXT: | | | |-CallExpr [[ADDR_222:0x[a-z0-9]*]] <col:52, col:59> 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_223:0x[a-z0-9]*]] <col:52> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_224:0x[a-z0-9]*]] <col:52> 'int ({{.*}})' {{.*}}Function [[ADDR_100]] 'base12' 'int ({{.*}})'
// CHECK-NEXT: | | | `-CallExpr [[ADDR_225:0x[a-z0-9]*]] <line:52:29, line:82:59> 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_226:0x[a-z0-9]*]] <line:52:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_105]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'picked6' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | `-PseudoObjectExpr [[ADDR_227:0x[a-z0-9]*]] <line:82:63, col:70> 'int'
// CHECK-NEXT: | | |-CallExpr [[ADDR_228:0x[a-z0-9]*]] <col:63, col:70> 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_229:0x[a-z0-9]*]] <col:63> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_230:0x[a-z0-9]*]] <col:63> 'int ({{.*}})' {{.*}}Function [[ADDR_106]] 'base13' 'int ({{.*}})'
// CHECK-NEXT: | | `-CallExpr [[ADDR_231:0x[a-z0-9]*]] <line:55:29, line:82:70> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_232:0x[a-z0-9]*]] <line:55:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_111]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_18]] 'picked7' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | `-PseudoObjectExpr [[ADDR_233:0x[a-z0-9]*]] <line:83:10, col:22> 'int'
// CHECK-NEXT: | |-CallExpr [[ADDR_234:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_235:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_236:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_112]] 'overloaded1' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_237:0x[a-z0-9]*]] <line:59:1, line:83:22> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_238:0x[a-z0-9]*]] <line:59:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_114]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_115]] 'overloaded1[implementation={extension(match_any)}, device={kind(cpu, gpu)}]' 'int ({{.*}})'
// CHECK-NEXT: `-PseudoObjectExpr [[ADDR_239:0x[a-z0-9]*]] <line:83:26, col:38> 'int'
// CHECK-NEXT: |-CallExpr [[ADDR_240:0x[a-z0-9]*]] <col:26, col:38> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_241:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_242:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' {{.*}}Function [[ADDR_119]] 'overloaded2' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_243:0x[a-z0-9]*]] <line:64:1, line:83:38> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_244:0x[a-z0-9]*]] <line:64:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_124]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_125]] 'overloaded2[implementation={extension(match_none)}, device={kind(fpga, gpu)}]' 'int ({{.*}})'
|
shape.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* shape.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef SHAPE_H_
#define SHAPE_H_
#include <cstring>
#include <cstdio>
#include "../dll.h"
#include "../nd4jmalloc.h"
#include "../templatemath.h"
#include "../helpers/logger.h"
#include "../pointercast.h"
#include "../cnpy/cnpy.h"
#include <op_boilerplate.h>
#define MAX_DIMENSION 0x7fffffff
#define MAX_NUM_THREADS 1024
#define MAX_RANK 32
#define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4
#define MAX_COORD 3
#define PREALLOC_SIZE 33554432
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/sharedmem.h>
#endif
#ifdef __CUDACC__
#define INLINEDEF inline
#else
#define INLINEDEF inline
#endif
#include "../pairwise_util.h"
#include <stdint.h>
#include <array/ArrayOptions.h>
typedef unsigned int uint;
namespace shape {
/**
* Shape information approximating
* the information on an ndarray
*/
struct ND4J_EXPORT ShapeInformation {
_CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0)
: shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_)
{}
Nd4jLong *shape;
Nd4jLong *stride;
char order;
int rank;
int offset;
int elementWiseStride;
};
/**
* Indexing information
* for bounds checking
*/
struct ND4J_EXPORT CurrentIndexing {
int numElementsPerThread;
int blockStartingIndex;
int startingThreadIndex;
int endingThreadIndex;
};
ND4J_EXPORT _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2);
ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim);
template <typename T>
ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length);
ND4J_EXPORT _CUDA_HD void traceNew(int id);
ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength);
ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder);
ND4J_EXPORT _CUDA_HD bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output);
//ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer);
#ifdef __CUDACC__
template <typename T>
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager);
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size);
#endif
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret);
ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order);
ND4J_EXPORT _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order);
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy);
ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo);
/**
* copy-past from java hasDefaultStridesForShape function
* check whether array is not permuted and has contiguous elements in memory
*/
ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer);
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange);
/**
* In place permute swap
* @param length
* @param shape
* @param rearrange
*/
ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange);
ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange);
/**
* Rearrange the permute indexes
* according to which dimensions are specified.
*
* For example, dimension is implicitly:
* 0,1,2
*
* If you want to do a reduce along dimensions 0 and 1,
* you need to permute the indexes to be:
* 2,0,1
*
* which will give us the ability to ierate along an element
* wise stride.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength);
/**
* This method does inplace transpose of given shapeBuffer
*
* @param shapeBuffer
*/
ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer);
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride);
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength);
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of cthe shape
*/
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank);
/**
* When 1 dimension is the whole length of the
* array
*/
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank);
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo);
/**
* Returns the shape portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy);
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes);
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
//ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer);
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
/**
* Returns the rank portion of
* an information buffer
*/
ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD int rank(const int *buffer);
ND4J_EXPORT _CUDA_HD int rank(const unsigned int *buffer);
// returns pointer on elementWiseStride
ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo);
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer);
/**
* Returns the stride portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer);
/**
* Compute the length of the given shape
*/
ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape);
/***
* Returns the offset portion of an information buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer);
/**
* Returns the ordering
* for this shape information buffer
*/
ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer);
/**
* Returns the type
*/
ND4J_EXPORT _CUDA_HD Nd4jLong type(const Nd4jLong* shapeInfo);
/**
* Returns the element wise stride for this information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
* relative to a dimension and ordering for a reduction index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength);
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info);
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength);
/**
* Iterate over a given set of indexes
* the begin and end indexes are 0 based.
* 1 padding is automatically assumed for the ending.
*
* For example if you want to iterate over 0 to 4
* it will go to 4 rather than 3.
*
* indexes should be the indexes to exclude
* indexes length should be the length of indexes
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end);
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
//#ifdef __CUDACC__
// __device__
//#endif
// ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo();
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret);
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment);
/**
* Range between from and two with an
* increment of 1
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to);
/**
* Keep the given indexes
* in the data
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength);
/**
* Generate reverse copy of the data
* @param data
* @param length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length);
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length);
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length);
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths);
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank,
int index,
Nd4jLong *shape,
Nd4jLong *tensorShape,
int tensorShapeLength,
int *dimension,
int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
* Computes the tensor along dimension
* offset
* @param index the index to get the offset for the tad for
* @param rank the rank of the shapes and strides
* @param info the shape information to use for tad
* @param dimension the dimensions to use for computing the tensor along dimensions
*/
// ND4J_EXPORT _CUDA_HD int offset(int index,
// int rank,
// shape::ShapeInformation *info,
// Nd4jLong *dimension,
// int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank,
volatile int length,
volatile Nd4jLong *shape,
int *dimension,
int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i);
/**
* Computes the number of tads per block
*
*/
ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads);
// ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
// int dimensionLength);
/**
* Returns a shape buffer
* for the shape information metadata.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info);
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret);
/**
* Returns the number of elements per thread
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int numElementsPerThread(int N);
/**
* Returns the block starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int blockStartingIndex(int N);
/**
* Returns the thread starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadStartingIndex(int N, int stride, int offset);
/**
* Returns the thread ending index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadEndingIndex(int N, int stride, int offset);
/**
* Returns indexing information
* for the current kernel invocation
*/
//#ifdef __CUDACC__
// __device__
//#endif
// CurrentIndexing *currentIndex(int N, int offset, int stride);
/** Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad);
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal);
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum);
/**
* Returns the prod of the data
* up to the given length
*/
ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length);
ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length);
/**
* Returns the rear most left over item not present in
* the dimension array. This assumes that the dimension array is sorted.
*
* For example, given a dimension array of:
* 0,2
*
* and
*
* 12,4,2,1 in data
*
* You end up with 1 (data[3])
* since the first item won't match
* the last item of the dimension array
*/
// ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices,int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices);
/**
* increment n-dimensional array by one iteration by changing coord appropriately
* for example we have array with shape {2, 3}:
* - if input coord = {0,1}, then output coord = {0,2}
* - if input coord = {0,2}, then output coord = {1,0}
* so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2}
*/
/* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1}
* arrLen - array length
*/
ND4J_EXPORT _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order);
ND4J_EXPORT _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned);
/**
* Compute the real linear indices for the given shape and stride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride);
/**
* Compute the real linear indices for the
* given shape buffer. Shape,stride and rank are derived
* from the buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides);
ND4J_EXPORT _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length);
ND4J_EXPORT _CUDA_HD void printIntArray(const int *arr, const int length);
ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length);
template<typename T>
ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message);
ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr);
// ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also sort input array of dimensions, this operation is also necessary for creating TAD object
ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions);
// function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
// dimsToExclude - should be sorted in increasing order
// dimsLen - length of dimsToExclude, if not set (= -1), then it is calculated as maxRank - minRank
ND4J_EXPORT _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculate offsets of max-array, these output offsets correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculates offsets for numOfSubArrs sub-arrays, shape in this context means dominions excluded from outer array
// rank is equal to size of shape
ND4J_EXPORT void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets);
ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order);
// calculate element-wise stride
// if array is scalar or unit length vector then ews = 1
// if array is common vector then ews = stride of non-unity dimension
// if strides are normal set ews = 1, otherwise ews = 0
ND4J_EXPORT _CUDA_HD void calcEws(Nd4jLong* shapeInfo, Nd4jLong len);
//END HEADERS
//BEGIN IMPLEMENTATIONS
#ifdef __CUDACC__
template <typename T>
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) {
// if we go for 3 dimensions coord space or below - just use shared memory for that
if (size <= MAX_COORD * 4) {
Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD);
return ptr;
} else {
// otherwise go to preallocated global memory :(
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid * size > PREALLOC_SIZE - size) {
return (Nd4jLong *) malloc(size);
} else {
Nd4jLong *ret = buffer;
ret += (tid * size);
return ret;
}
}
}
#endif
#ifdef __CUDACC__
/**
* BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES
*/
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) {
Nd4jLong *ret = buffer;
ret += (threadIdx.x * size);
return ret;
}
#endif
/**
* Length of a tad given
* the shape information
*/
INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
/**
* Tad element wise stride:
* given the inner most dimension (the sorted dimension of the last)
* the element wise stride of the tad (disregarding order) is the
* last dimension's stride.
*
* For a given singular dimension this will just be the only entry.
* For example, given the following c order shape/stride:
* 2,2,3,2
* 12,6,2,1
*
* The tad element wise stride for 3 will be 1.
* For zero it wil be 12
*
* For 2,3 it's 1
*
* Note here that the multi dimensional 2,3 case
* is equivalent to the singular 3 case.
*
*
* Note that this is for the dimension that ultimately
* ends up removed.
*
* Again: this may not preserve ordering of the tad
* but maybe used for reductions.
*/
INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength);
}
INLINEDEF _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) {
return shape::shapeEquals(shape::rank(shapeInfo1), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo1)), shape::rank(shapeInfo2), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo2)));
}
INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) {
if(rank1 != rank2)
return false;
for(int i = 0; i < rank1; i++) {
if(stride1[i] != stride2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) {
Nd4jLong *retShape;
int retShapeLength;
if(dimensionLength == 1 && dimension[0] == 2147483647) {
retShape = new Nd4jLong[2];
retShape[0] = 1;
retShape[1] = 1;
retShapeLength = 2;
}
else {
retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength);
retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength;
}
//ensure vector is proper shape
if (retShapeLength == 1) {
if (dimension[0] == 0) {
auto newRetShape = new Nd4jLong[2]{1, retShape[0]};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
else {
auto newRetShape = new Nd4jLong[2]{retShape[0], 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
} else if (retShapeLength == 0) {
auto newRetShape = new Nd4jLong[2]{1, 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape);
delete[] retShape;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) {
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *theStride = shape::stride(shapeInfo);
int rank = dimensionLength == 1 ? 2 : dimensionLength;
Nd4jLong *ret = buffer;
//set the rank
ret[0] = rank;
Nd4jLong *retShape = shape::shapeOf(ret);
Nd4jLong *retStride = shape::stride(ret);
int len = rank;
if(dimensionLength == 1) {
if(shape::isMatrix(theShape,shape::rank(shapeInfo))) {
if(dimension[0] == 0) {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
else {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong newStride[2] = {1,theStride[dimension[0]]};
Nd4jLong newShape[2] = {1,theShape[dimension[0]]};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong *newIndexes = dimension;
if(reverseCopyStride)
shape::reverseCopyTo(theStride, retStride, newIndexes, len);
else
shape::copyTo(len, theStride, retStride, newIndexes);
shape::copyTo(len, theShape, retShape, newIndexes);
}
ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) {
int rank = dimensionLength == 1 ? 2 : dimensionLength;
traceNew(4);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return createShapeInfo(shape, stride, rank, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) {
buffer[0] = rank;
Nd4jLong *retShape = shape::shapeOf(buffer);
Nd4jLong *retStride = shape::stride(buffer);
for(int i = 0;i < rank; i++) {
retShape[i] = shape[i];
retStride[i] = stride[i];
}
return buffer;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) {
if (isVector(shape, rank)) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[2];
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
traceNew(6);
Nd4jLong *stride = new Nd4jLong[dimensions];
int st = startNum;
for (int j = 0; j < rank; j++) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) {
if (isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
int st = startNum;
for (int j = 0; j < rank; j++) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) {
traceNew(7);
Nd4jLong *stride = new Nd4jLong[rank];
if (rank == 1) {
stride[0] = 1;
return stride;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// stride[i] = 1;
// return stride;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) {
if (rank == 1) {
ret[0] = 1;
return ret;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// ret[i] = 1;
// return ret;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) {
return calcStridesFortran(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStridesFortran(shape, rank, 1, ret);
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) {
return calcStrides(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStrides(shape, rank, 1, ret);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shapeInfo, const char order) {
int rank = shapeInfo[0];
int doubleRank = 2*rank;
if (rank > 0) {
if (order == 'c') {
shapeInfo[doubleRank] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j) {
shapeInfo[doubleRank - j] = shapeInfo[doubleRank - j + 1] * shapeInfo[rank + 1 - j];
}
} else {
shapeInfo[rank + 1] = 1; // set unity as first stride for f order
for (int j = rank + 1; j < doubleRank; ++j) {
shapeInfo[j + 1] = shapeInfo[j] * shapeInfo[j - rank];
}
}
}
// set last 2 elements in shapeInfo
shapeInfo[doubleRank + 2] = 1;
shapeInfo[doubleRank + 3] = (int)order;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order) {
if (rank > 0) {
if (order == 'c') {
stridesOnly[rank - 1] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j)
stridesOnly[rank - 1 - j] = stridesOnly[rank - j] * shapeOnly[rank - j];
}
else {
stridesOnly[0] = 1; // set unity as first stride for f order
for (int j = 1; j < rank; ++j) {
stridesOnly[j] = stridesOnly[j - 1] * shapeOnly[j - 1];
}
}
}
}
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) {
for(int i=0; i<dimSize-1; ++i)
if(dimensions[i] > dimensions[i+1])
return true;
return false;
}
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) {
auto copy = new ShapeInformation;
traceNew(8);
copy->shape = new Nd4jLong[toCopy->rank];
memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong));
traceNew(9);
copy->stride = new Nd4jLong[toCopy->rank];
for (int i = 0; i < toCopy->rank; i++) {
copy->stride[i] = toCopy->stride[i];
}
copy->order = toCopy->order;
copy->rank = toCopy->rank;
copy->offset = toCopy->offset;
copy->elementWiseStride = toCopy->elementWiseStride;
return copy;
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) {
if (rank == 0)
return 1;
if(shape::isVector(shape,rank)) {
return stride[rank - 1];
}
else {
int oldnd;
Nd4jLong *oldDims = shape::copyOf(rank, shape);
Nd4jLong *oldStrides = shape::copyOf(rank, stride);
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
traceNew(10);
auto newStrides = new Nd4jLong[rank];
oldnd = 0;
//set the shape to be 1 x length
int newShapeRank = 2;
auto newShape = new Nd4jLong[newShapeRank];
newShape[0] = 1;
newShape[1] = shape::prodLong(shape, rank);
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < rank; oldStart++) {
if (shape[oldStart] != 1) {
oldDims[oldnd] = shape[oldStart];
oldStrides[oldnd] = stride[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newShapeRank; newStart++) {
np *= newShape[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newShapeRank && oldStart < oldnd) {
np = newShape[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShape[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShape[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
/*
* Set strides corresponding to trailing 1s of the new shape.
*/
if (newStart >= 1) {
last_stride = newStrides[newStart - 1];
} else {
last_stride = stride[rank - 1];
}
if (isFOrder) {
if (newStart >= 1)
last_stride *= newShape[newStart - 1];
}
for (nk = newStart; nk < newShapeRank; nk++) {
newStrides[nk] = last_stride;
}
//returns the last element of the new stride array
int ret = last_stride;
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return ret;
}
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder,
Nd4jLong *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return stride[dimension[0]];
}
return 0;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
Nd4jLong *stride = shape::calcStrides(shape, rank);
traceNew(11);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'c';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
/**
* This is special method, it returns ONLY 2D shapebuffer.
*
* This method is used only for SoftMax
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) {
Nd4jLong stride[MAX_RANK];
shape::calcStrides(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'c';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, buffer);
nd4j::ArrayOptions::setDataType(buffer, dtype);
return buffer;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
auto stride = shape::calcStridesFortran(shape,rank);
traceNew(12);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'f';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) {
Nd4jLong stride[MAX_RANK];
shape::calcStridesFortran(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'f';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, output);
nd4j::ArrayOptions::setDataType(output, dtype);
return output;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) {
Nd4jLong length = shape::prodLong(shape,rank);
traceNew(13);
Nd4jLong *ret = new Nd4jLong[length];
for(int i = 0; i < length; i++) {
Nd4jLong *idx = shape::ind2sub(rank, shape, i);
ret[i] = shape::getOffset(0, shape, stride, idx, rank);
delete[] idx;
}
return ret;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) {
return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer));
}
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
INLINEDEF _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices) {
Nd4jLong index = indices[rank-1];
Nd4jLong shift = 1;
for(int i = rank-2; i >= 0; --i) {
shift *= shape[i+1];
index += shift * indices[i];
}
return index;
}
template <typename T>
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
PRAGMA_OMP_SIMD
for (int e = 0; e < length; e++)
buffer[e] = value;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2sub(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) {
return ind2sub(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) {
int denom = numIndices;
for(int i = rank - 1; i >= 0; i--) {
denom /= shape[i];
ret[i] = index / denom;
index %= denom;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2sub(rank,shape, index, shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong * ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2subC(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong *ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index) {
return ind2subC(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param arrLen the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *ret) {
for(int i = 0; i < rank; i++) {
arrLen /= shape[i];
if(arrLen > 0) {
ret[i] = index / arrLen;
index %= arrLen;
}
else
ret[i] = 0;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2subC(rank,shape, index,shape::prodLong(shape,rank),out);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) {
const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2];
if(ews > 0 && order(shapeInfo) == 'c')
if (ews == 1)
return index;
else
return ews * index;
Nd4jLong offset = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + shapeInfo[0]];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) {
const uint rank = shapeInfo[0];
const uint ews = shapeInfo[rank + rank + 2];
if(ews > 0 && shapeInfo[rank + rank + 3] == 99)
if (ews == 1)
return index;
else
return ews * index;
uint offset = 0;
for(uint i = 1; i <= rank; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + rank];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned) {
if(useUnsigned)
return getIndexOffset(static_cast<uint>(index), uShapeInfo, static_cast<uint>(arrLen));
return getIndexOffset(index, lShapeInfo, arrLen);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) {
Nd4jLong offset = 0;
if(order == 'c') {
for(int i = 1; i <= *shapeInfo; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
else {
for(int i = *shapeInfo; i >= 1 ; --i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
return offset;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) {
if(shape::order(shapeInfo) == 'f') {
shape::ind2sub(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
else {
shape::ind2subC(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) {
ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) {
traceNew(16);
Nd4jLong *ret = new Nd4jLong[length];
for (int i = 0; i < length; i++) {
ret[i] = shape[rearrange[i]];
}
return ret;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) {
if(length == 1) {
return;
}
else {
Nd4jLong *shapeDeref = *shape;
if(shape::prodLong(shapeDeref,length) < 2) {
return;
}
}
bool inOrder = true;
for(int i = 0; i < length - 1; i++) {
inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1];
}
//all in order, nothing to do
if(inOrder)
return;
Nd4jLong *shapeDeref = *shape;
//we know they are just reversed, dimension length of 2
if(length == 2) {
auto shapeFirst = shapeDeref[0];
auto shapeSecond = shapeDeref[1];
shapeDeref[0] = shapeSecond;
shapeDeref[1] = shapeFirst;
return;
}
else if(length == 1) {
//no permute
return;
}
auto temp = new Nd4jLong[length];
memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length);
for (int i = 0; i < length; i++) {
shapeDeref[i] = temp[rearrange[i]];
}
delete[] temp;
}
INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) {
if(shapeBuffer != out)
memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer)));
doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out);
}
INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) {
auto len = shape::shapeInfoLength(shape::rank(shapeBuffer));
Nd4jLong *copy = shape::copyOf(len, shapeBuffer);
doPermuteShapeBuffer(copy,rearrange);
return copy;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[2 * rank + 2] = 0; // ews
shapeInfo[2 * rank + 3] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); // order
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = 0;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo), 1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) {
//no swapping needs to happen
if(shape::isScalar(shapeBuffer)) {
return;
}
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rearrageRank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
// doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large
}
/*
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
auto shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer);
shapeRef[shapeInfoLength(rearrageRank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
}
*/
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange);
shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1);
delete[] rearrangeCopy1;
auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange);
shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2);
shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
shapeBuffer[shape::shapeInfoLength(rank) - 2] = 0;
delete[] rearrangeCopy2;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
if(shapeBuffer != tmpBuffer)
shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
}
INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) {
int delta = originalRank - dimensionLength;
traceNew(17);
Nd4jLong *ret = new Nd4jLong[originalRank];
for(int i = 0; i < delta; i++) {
ret[i] = i + dimensionLength;
}
for(int i = delta; i < originalRank; i++) {
ret[i] = i - delta;
}
return ret;
}
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) {
int sd = -1;
int dim = -1;
int i = -1;
int cContiguous = 1;
int isFortran = 1;
sd = 1;
for (i = length - 1; i >= 0; --i) {
dim = shape[i];
if (stride[i] != sd) {
cContiguous = 0;
break;
}
/* contiguous, if it got this far */
if (dim == 0) {
break;
}
sd *= dim;
}
/* check if fortran contiguous */
sd = elementStride;
for (i = 0; i < length; ++i) {
dim = shape[i];
if (stride[i] != sd) {
isFortran = 0;
}
if (dim == 0) {
break;
}
sd *= dim;
}
if (isFortran && cContiguous)
return 'a';
else if (isFortran && !cContiguous)
return 'f';
else if (!isFortran && !cContiguous)
return 'c';
else
return 'c';
}
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) {
if (arrLength != shapeLength)
return -1;
for (int i = 0; i < arrLength; i++) {
if (arr[i] >= arrLength || arr[i] < 0)
return -1;
}
for (int i = 0; i < arrLength; i++) {
for (int j = 0; j < arrLength; j++) {
if (i != j && arr[i] == arr[j])
return -1;
}
}
return 1;
}
INLINEDEF _CUDA_HD void traceNew(int id) {
//printf("new happened: [%i]\n", id);
#ifndef __CUDACC__
//fflush(stdout);
#endif
}
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) {
ShapeInformation *infoDeref = *info;
checkArrangeArray(rearrange, rank, rank);
shape::doPermuteSwap(rank, &infoDeref->shape, rearrange);
shape::doPermuteSwap(rank, &infoDeref->stride, rearrange);
char order = getOrder(rank,
infoDeref->shape,
infoDeref->stride,
infoDeref->elementWiseStride);
infoDeref->order = order;
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) {
if (rank == 0)
return 0;
if (rank == 1)
return 1;
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1 && shapeInfo[0] > 2;
}
INLINEDEF _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
if(rank(shapeInfo) > 0 && length(shapeInfo) == 1)
return true;
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1;
}
INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) {
return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo));
}
INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1;
return isVector && shapeFirstOne;
}
INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && !shapeFirstOne;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) {
for(int i = 0; i < rank; i++) {
if(shape[i] == shape::prod(shape,rank))
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) {
return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) {
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 0;
}
return 1;
}
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) {
return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns the shape portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) {
return buffer + 1;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) {
traceNew(18);
T *ret = new T[length];
return copyOf(length, toCopy, ret);
}
template <typename T>
INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) {
memcpy(ret, toCopy, sizeof(T)*length);
return ret;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) {
memcpy(to, from, sizeof(T)*length);
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) {
for(int i = 0; i < length; i++) {
to[i] = from[indexes[i]];
}
}
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
/*
INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
checkArrangeArray(rearrange, shapeRank, shapeRank);
Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
delete[] strideCopy;
return newStride;
}
*/
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) {
return shape + 1;
}
INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) {
return static_cast<int>(shape::shapeOf(shapeBuffer)[0]);
}
INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
int newRank = rank - 1;
if(newRank < 2)
newRank = 2;
Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)];
newShapeBuffer[0] = newRank;
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
Nd4jLong *currStride = shape::stride(shapeBuffer);
//initialize new shape and stride by taking the shape and stride + 1
//and adding to the shape information
//a slice is always just taking the existing shape and cutting the first index off
//of the shape and stride
Nd4jLong *newShape = shape::shapeOf(newShapeBuffer);
Nd4jLong *newStride = shape::stride(newShapeBuffer);
if(shape::isVector(shapeBuffer)) {
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
//row vector: slice index 0 is a valid index, just copy the whole thing
if(currShape[0] == 1) {
if(sliceIdx == 0) {
memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer)));
return newShapeBuffer;
}
}
//column vector: this will be a scalar
else {
delete[] newShapeBuffer;
Nd4jLong *scalar = shape::createScalarShapeInfo();
int offset = shape::offset(shapeBuffer);
scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx;
return scalar;
}
}
else if(shape::isMatrix(shapeBuffer)) {
newShape[0] = 1;
newShape[1] = currShape[1];
newStride[0] = 1;
newStride[1] = currStride[1];
}
else {
for(int i = 0; i < newRank; i++) {
newShape[i] = currShape[i + 1];
newStride[i] = currStride[i + 1];
}
}
auto indices = new Nd4jLong[rank];
memset((void *) indices,0,rank * sizeof(Nd4jLong));
indices[0] = sliceIdx;
Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank);
newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset;
if(shape::isMatrix(shapeBuffer)) {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1];
}
else {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer);
}
newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1);
delete[] indices;
return newShapeBuffer;
}
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
INLINEDEF _CUDA_HD int shapeInfoLength(int rank) {
//FIXME magic numbers
return rank * 2 + 4;
}
INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) {
//FIXME magic numbers
return (rank * 2 + 4) * sizeof(Nd4jLong);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) {
//FIXME magic numbers
return shapeInfoByteLength((int) shapeInfo[0]);
}
/**
* Returns the rank portion of
* an information buffer
*/
INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD int rank(const int *buffer) {
return buffer[0];
}
INLINEDEF _CUDA_HD int rank(const unsigned int *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo) {
return shapeInfo + 2 * shapeInfo[0] + 2;
}
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) {
traceNew(19);
auto info = new ShapeInformation;
auto length = shapeInfoLength(rank(buffer));
auto rank = buffer[0];
//start after rank
info->shape = buffer + 1;
info->stride = buffer + (1 + rank);
info->rank = rank;
info->offset = buffer[length - 3];
info->elementWiseStride = buffer[length - 2];
Nd4jLong *stride = buffer + 1 + rank;
info->stride = stride;
info->order = (char) buffer[length - 1];
return info;
}
/**
* Returns the stride portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer) {
return buffer + (1 + rank(buffer));
}
INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) {
return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY);
}
/**
* Compute the length of the given shape
*/
INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
if (rank == 0) {
if (isEmpty(shapeInfo))
return 0L;
else
return 1L;
}
if (rank == 1)
return shapeInfo[1];
return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank);
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
/***
* Returns the offset
* portion of an information buffer
*/
INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
/**
* Returns the ordering
* for this shape information buffer
*/
INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) {
//FIXME magic numbers
return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]);
}
/**
* Returns type
*/
INLINEDEF _CUDA_HD Nd4jLong type(const Nd4jLong *shapeInfo) {
return shapeInfo[2 * shapeInfo[0] + 1];
}
/**
* Returns the element wise stride for this information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) {
return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2];
}
/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/
INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) {
if(dimensionLength > 1) {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
//int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
//return tadElementWiseStride;
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
return 1;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
return 1;
}
}
else {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
}
}
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) {
const int rank = shape::rank(info);
if(rank > 2)
return 0;
if(rank == 0)
return 1;
if(rank == 1)
return shape::shapeOf(info)[0] == 1;
if(rank == 2)
return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1;
return 0;
}
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) {
const int rank = info->rank;
if(rank > 2)
return 0;
if(rank == 1)
return info->shape[0] == 1;
if(rank == 2)
return info->shape[0] == 1 && info->shape[1] == 1;
return 0;
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) {
int count = 0;
int absLength = dataLength - indexesLength;
for (int i = 0; i < dataLength && count < absLength; i++) {
int contains = 0;
for (int j = 0; j < indexesLength; j++) {
if (i == indexes[j]) {
contains = 1;
break;
}
}
if (!contains) {
ret[count] = data[i];
count++;
}
}
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) {
auto lengthOfArr = dataLength - indexesLength;
if(lengthOfArr < 0) {
printf("Remove index call created a <= 0 length array. This was likely not intended.");
}
auto ret = new T1[lengthOfArr];
memset(ret,0,sizeof(T1) * lengthOfArr);
removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) {
int len = end - indexesLength;
traceNew(20);
auto ret = new Nd4jLong[len];
int retIdx = 0;
//not here that we do 0 based indexing for end - this assumes things like:
//0 to 4 are specified
for(int i = begin; i < end ; i++) {
bool found = false;
for(int j = 0; j < indexesLength; j++) {
if(indexes[j] == i) {
found = true;
break;
}
}
if(!found) {
ret[retIdx++] = i;
}
}
return ret;
}
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
#ifdef __CUDACC__
INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) {
return offset + threadIdx.x * xInfo->elementWiseStride;
}
#endif
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) {
traceNew(21);
Nd4jLong *ret = new Nd4jLong[2];
if (dimension == 0) {
ret[0] = 1;
ret[1] = shape[0];
} else {
ret[0] = shape[0];
ret[1] = 1;
}
return ret;
}
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) {
return ensureVectorShape(shape, 0);
}
/**
* This method does STRICT comparison for two shape buffers
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++) {
if(e == (length - 3)) continue; // type position, neglect it
if (shapeA[e] != shapeB[e])
return false;
}
return true;
}
INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) {
if (dim >= 0)
return shape[1+dim];
else
return shape[1+(rank(shape) + dim)];
}
/**
* This method does SOFT comparison for two shape buffers, we compare only rank & shapes
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we compare only shapes, and ignoring stride & ews
auto length = shapeA[0];
for (int e = 1; e <= length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3];
}
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to, int increment) {
int diff = nd4j::math::nd4j_abs<int>(from - to);
int retLength = diff / increment;
T *ret;
traceNew(22);
if(diff / increment < 1)
ret = new T[1];
else
ret = new T[diff / increment];
if (from < to) {
int count = 0;
for (int i = from; i < to; i += increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
} else if (from > to) {
int count = 0;
for (int i = from - 1; i >= to; i -= increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
}
return ret;
}
/**
* Generate a range
* beginning at from and ending at to
* incrementing by 1
* @param from the start
* @param to the end
* @return the int array starting at from and ending at to
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to) {
return range<T>(from, to, 1);
}
/**
* Keep the given indexes in the data
* @param data
* @param index
* @param indexLength
* @param dataLength
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) {
traceNew(23);
Nd4jLong *ret = new Nd4jLong[indexLength];
int count = 0;
for (int i = 0; i < dataLength; i++) {
int contains = 0;
for (int j = 0; j < indexLength; j++) {
if (i == index[j]) {
contains = 1;
break;
}
}
if (contains)
ret[count++] = data[i];
}
return ret;
}
/**
* Generate a reverse
* copy of the data
*/
template <typename T>
INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) {
if (length < 1)
return nullptr;
traceNew(24);
T *copy = new T[length];
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = data[i];
copy[i] = data[length - i - 1];
copy[length - i - 1] = temp;
}
return copy;
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[i];
to[i] = from[length - i - 1];
to[length - i - 1] = temp;
}
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[indexes[i]];
to[i] = from[indexes[length - i - 1]];
to[length - i - 1] = temp;
}
}
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) {
traceNew(25);
T *ret = new T[arr1Length + arr2Length];
std::memcpy(ret, arr1, arr1Length * sizeof(T));
std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T));
return ret;
}
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) {
T* ret = new T[numTotalElements];
Nd4jLong count = 0;
for (Nd4jLong i = 0; i < numArrays; i++) {
for (Nd4jLong j = 0; j < lengths[i]; j++) {
ret[count++] = arr[i][j];
}
}
return ret;
}
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) {
if(shape::isVector(shape,rank)) {
//return total length for row vectors
if(dimensionLength == 1 && shape[0] == 1) {
return shape::prod(shape,rank);
}
}
else if(rank == dimensionLength)
return shape::prod(shape,rank);
int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength);
traceNew(27);
auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength);
auto ret = prodLong(ret2, absSelta);
delete[] ret2;
return ret;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) {
auto tensorLength = prodLong(tensorShape, tensorShapeLength);
auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength);
if (lengthPerSlice2 <= 0) {
return 0;
}
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) {
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
#ifdef __CUDACC__
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) {
return offset + threadIdx.x * elementWiseStride(xInfo);
}
#endif
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length,
volatile Nd4jLong *shape, int *dimension, int dimensionLength) {
Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank);
Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
Nd4jLong *keepShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo));
Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices, int rank) {
Nd4jLong offset = baseOffset;
for(int i = 0; i < rank; i++) {
if(indices[i] >= shape[i] && shape[i] != 1) {
#ifdef __CUDA_ARCH__
printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]);
#else
printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]);
#endif
#ifdef __CUDA_ARCH__
//if (threadIdx.x == 0 && blockIdx.x == 0)
// printShapeInfoLinear("getOffsetFailed", rank, shape, stride);
#endif
return -1;
}
if(shape[i] != 1) {
offset += indices[i] * stride[i];
}
}
return offset;
}
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) {
return blockIdx + i * blockSize;
}
/**
* Computes the number of tads per block
*
*/
INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) {
return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize);
}
/**
* Returns a shape buffer
* for the shape information metadata.
*/
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) {
traceNew(29);
auto ret = new Nd4jLong[shapeInfoLength(info->rank)];
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count] = info->order;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) {
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
if (ret[0] == 0) {
ret[1] = 0;
ret[2] = 1;
ret[3] = 99;
return ret;
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count++] = info->order;
return ret;
}
INLINEDEF _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %lld ", (long long) arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printIntArray(const int *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %i ", arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
Nd4jLong *shape = shape::shapeOf(shapeInfo);
printf("Rank %d\n",rank);
printf("Shape:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ",(long long) shape[i]);
}
printf("\n");
Nd4jLong *stride = shape::stride(shapeInfo);
printf("Stride:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ", (long long) stride[i]);
}
printf("\n");
printf("Order %c\n",shape::order(shapeInfo));
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("ShapeInfo: [");
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides) {
printf("%s : [", msg);
for (int i = 0; i < rank; i++) {
printf("%lld, ", (long long) shape[i]);
}
for (int i = 0; i < rank; i++) {
printf("%lld", (long long) strides[i]);
if (i < rank - 1)
printf(", ");
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("%s : [", msg);
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
template <typename T>
INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) {
auto arr = reinterpret_cast<T*>(varr);
if (message != nullptr)
printf("%s: [", message);
else
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", (float) arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printArray(float *arr,int length) {
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
}
/**
* Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) {
return i / (numElementsPerTad * elementWiseStride);
}
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal) {
if (tadIndexForOriginal == 0)
return 0;
return tadIndexForOriginal / (tadsForOriginal / tadsForReduced);
}
INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *shape = shape::shapeOf(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
// swap shape
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = shape[idx2];
shape[idx2] = shape[idx1];
shape[idx1] = tmp;
}
// swap strides
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = strides[idx2];
strides[idx2] = strides[idx1];
strides[idx1] = tmp;
}
if (shape::order(shapeBuffer) == 'c')
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102;
else
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99;
}
/**
* Tad index for linear
* @param linearIndex
* @param tadLength
* @return
*/
INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) {
return linearIndex % tadLength;
}
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) {
return tadsForOriginal / tadsForReduce;
}
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum) {
int tad = tadIndex(i, elementWiseStride, numElementsPerTad);
return reductionIndexForTad(tad, tadNum, originalTadNum);
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() {
traceNew(30);
auto shape = new Nd4jLong[1];
shape[0] = 1;
auto stride = new Nd4jLong[1];
stride[0] = 1;
auto shapeInformation2 = new ShapeInformation();
shapeInformation2->rank = 1;
shapeInformation2->offset = 0;
shapeInformation2->stride = stride;
shapeInformation2->shape = shape;
shapeInformation2->elementWiseStride = 1;
shapeInformation2->order = 99;
Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2);
delete shapeInformation2;
delete[] shape;
delete[] stride;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) {
ret[0] = 2;
ret[1] = 1;
ret[2] = 1;
ret[3] = 1;
ret[4] = 1;
ret[5] = 0;
ret[6] = 1;
ret[7] = 99;
return ret;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) {
int prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length) {
Nd4jLong prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) {
Nd4jLong *stride = shape::stride(data);
//corner case: return the final item when its greater than the max, since its guaranteed to be left over
//note here that strides are interpreted in reverse for tad
//start from the front rather than the back
int rank = shape::rank(data);
if(shape::order(data) == 'f') {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
else {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
int ret = stride[0];
return ret;
}
#ifdef __CUDACC__
__device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) {
// we read first element, to find out length of our shapeInfoBuffer
int rank = shapeInfoBuffer[0];
int len = shape::shapeInfoLength(rank);
for (int i = threadIdx.x; i < len; i += blockDim.x)
targetBuffer[i] = shapeInfoBuffer[i];
}
#endif
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) {
return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder);
}
// INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
// unsigned Nd4jLong *shape;
// unsigned int ndims, wordSize;
// bool fortranOrder;
// cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
// Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
// delete[] shape;
// return ret;
// }
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) {
if(fortranOrder) {
Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape);
return shapeBufferRet;
}
else {
Nd4jLong *newShape = new Nd4jLong[rank];
for(int i = 0; i < rank; i++) {
newShape[i] = shape[i];
}
Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape);
delete[] newShape;
return shapeBufferRet;
}
}
INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer));
char order = shape::order(shapeBuffer);
if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1)
return true;
if (order == 'c') {
for (int i = 1; i < rank; i++)
if (strides[i-1] <= strides[i])
return false;
return true;
} else if (order == 'f') {
for (int i = 1; i < rank; i++)
if (strides[i-1] >= strides[i])
return false;
return true;
} else {
printf("Unknown order for array!\n");
return false;
}
}
INLINEDEF _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo) {
return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0);
}
//////////////////////////////////////////////////////////////////////////
// copy-past from java hasDefaultStridesForShape function
INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) {
const int rank = shape::rank(shapeInfo);
if(rank == 0)
return true;
if(!strideDescendingCAscendingF(shapeInfo))
return false;
Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH];
memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo));
shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo));
bool result = true;
for(int i = rank+1; i <= 2*rank; ++i)
if(defaultShapeInfo[i] != shapeInfo[i]) {
result = false;
break;
}
return result;
}
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
// int oldnd;
// Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
// Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
// int np, op, last_stride;
// int oi, oj, ok, ni, nj, nk;
// Nd4jLong* newStrides = new Nd4jLong[newRank];
// oldnd = 0;
// /*
// * Remove axes with dimension 1 from the old array. They have no effect
// * but would need special cases since their strides do not matter.
// */
// for (oi = 0; oi < oldRank; oi++) {
// if (shape::shapeOf(oldShape)[oi] != 1) {
// olddims[oldnd] = shape::shapeOf(oldShape)[oi];
// oldstrides[oldnd] = shape::stride(oldShape)[oi];
// oldnd++;
// }
// }
// np = 1;
// for (ni = 0; ni < newRank; ni++) {
// np *= newShapeOf[ni];
// }
// op = 1;
// for (oi = 0; oi < oldnd; oi++) {
// op *= olddims[oi];
// }
// if (np != op) {
// /* different total sizes; no hope */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// if (np == 0) {
// /* the current code does not handle 0-sized arrays, so give up */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// /* oi to oj and ni to nj give the axis ranges currently worked with */
// oi = 0;
// oj = 1;
// ni = 0;
// nj = 1;
// while (ni < newRank && oi < oldnd) {
// np = newShapeOf[ni];
// op = olddims[oi];
// while (np != op) {
// if (np < op) {
// /* Misses trailing 1s, these are handled later */
// np *= newShapeOf[nj++];
// } else {
// op *= olddims[oj++];
// }
// }
// /* Check whether the original axes can be combined */
// for (ok = oi; ok < oj - 1; ok++) {
// if (isFOrder) {
// if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// } else {
// /* C order */
// if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// }
// }
// /* Calculate new strides for all axes currently worked with */
// if (isFOrder) {
// newStrides[ni] = oldstrides[oi];
// for (nk = ni + 1; nk < nj; nk++) {
// newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
// }
// } else {
// /* C order */
// newStrides[nj - 1] = oldstrides[oj - 1];
// for (nk = nj - 1; nk > ni; nk--) {
// newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
// }
// }
// ni = nj++;
// oi = oj++;
// }
// if (ni >= 1) {
// last_stride = newStrides[ni - 1];
// } else {
// last_stride = shape::elementWiseStride(oldShape);
// }
// if (isFOrder && ni >= 1) {
// last_stride *= newShapeOf[ni - 1];
// }
// for (nk = ni; nk < newRank; nk++) {
// newStrides[nk] = last_stride;
// }
// target[0] = newRank;
// int cnt = 1;
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newShapeOf[e];
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newStrides[e];
// target[shape::shapeInfoLength(newRank) - 3] = 0;
// target[shape::shapeInfoLength(newRank) - 2] = 0;
// target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
// nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape));
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return true;
// }
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo) {
// // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
// // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo
// const int newOrder = isFOrder ? 102 : 99;
// const int oldOrder = oldShapeInfo[2 * oldRank + 3];
// newShapeInfo[0] = newRank;
// memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
// Nd4jLong* newStrides = shape::stride(newShapeInfo);
// const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
// const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
// int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
// while (newStart < newRank && oldStart < oldRank) {
// newDim = newShape[newStart];
// oldDim = oldShape[oldStart];
// while (newDim != oldDim)
// if (newDim < oldDim) newDim *= newShape[newStop++];
// else oldDim *= oldShape[oldStop++];
// // ------ Check whether the original axes can be combined ------ //
// for (int i = oldStart; i < oldStop - 1; i++) {
// if(oldShape[i] == 1) { // ignore strides like {...,1,1,...}
// if(oldOrder == 102) ++oldStart;
// continue;
// }
// if(oldOrder == 102 && oldStrides[i + 1] != oldShape[i] * oldStrides[i])
// return false; // not contiguous enough
// if(oldOrder == 99 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1])
// return false; // not contiguous enough
// }
// // ------ Calculate new strides for all axes currently worked with ------ //
// if(isFOrder) {
// newStrides[newStart] = oldStrides[oldStart];
// for (int i = newStart + 1; i < newStop; ++i)
// newStrides[i] = newStrides[i - 1] * newShape[i - 1];
// }
// else {
// newStrides[newStop - 1] = oldStrides[oldStop - 1];
// for (int i = newStop - 1; i > newStart; --i)
// newStrides[i - 1] = newStrides[i] * newShape[i];
// }
// newStart = newStop++;
// oldStart = oldStop++;
// }
// newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
// newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
// newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
// return true;
// }
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo) {
// PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
// also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo
newShapeInfo[0] = newRank;
memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
Nd4jLong* newStrides = shape::stride(newShapeInfo);
const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
while (newStart < newRank && oldStart < oldRank) {
newDim = newShape[newStart];
oldDim = oldShape[oldStart];
while (newDim != oldDim)
if (newDim < oldDim) newDim *= newShape[newStop++];
else oldDim *= oldShape[oldStop++];
// ------ Check whether the original axes can be combined ------ //
for (int i = oldStart; i < oldStop - 1; i++)
if(oldShape[i] != 1 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1]) // oldShape[i] != 1 ---> ignore strides like {...,1,1,...}
return false; // not contiguous enough
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (int i = newStop - 1; i > newStart; --i)
newStrides[i - 1] = newStrides[i] * newShape[i];
newStart = newStop++;
oldStart = oldStop++;
}
newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
return true;
}
INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) {
int oldnd;
Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
auto newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < oldRank; oldStart++) {
if (shape::shapeOf(oldShape)[oldStart] != 1) {
oldDims[oldnd] = shape::shapeOf(oldShape)[oldStart];
oldStrides[oldnd] = shape::stride(oldShape)[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newRank; newStart++) {
np *= newShapeOf[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newRank && oldStart < oldnd) {
np = newShapeOf[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return true;
}
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also it sorts input array of dimensions, this operation is also necessary for creating TAD object
INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) {
int dimSize = dimensions.size();
if(dimSize == 0)
throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!");
// check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim|
for(auto& dim : dimensions)
if(dim < 0)
dim += rank;
// sort input array of dimensions, this operation is also necessary for creating TAD object in external methods
if (dimSize > 1) {
std::sort(dimensions.begin(), dimensions.end());
// remove duplicates if they are present
dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end());
}
// check whether number of dimensions is to big (>rank)
dimSize = dimensions.size();
if(dimSize > rank)
throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!");
// check if min dimension is still negative and whether max dimension is bigger then rank-1
if(dimensions[0] < 0 || dimensions.back() > (rank-1))
throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !");
}
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
INLINEDEF _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, int dimsLen) {
const auto maxRank = shape::rank(maxShapeInfo);
const auto minRank = shape::rank(minShapeInfo);
// if(minRank >= maxRank)
// throw std::runtime_error("shape::maxIndToMinInd method: rank of min array should be smaller then rank of max array!");
if(dimsLen == -1)
dimsLen = maxRank - minRank; // if size is not given (= -1) then it is equal to ranks difference
if(maxRank == minRank) {
if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1}
for (int i = 0; i < maxRank; ++i) {
if(i < dimsLen)
minIdxs[i] = maxIdxs[i];
else {
if(maxIdxs[i] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1];
else if(maxIdxs[i] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i];
}
}
}
else {
for (int i = 0, dim = 0; i < maxRank; ++i) {
if(dim < dimsLen && dimsToExclude[dim] == i) {
minIdxs[i] = maxIdxs[i];
++dim;
continue;
}
if(maxIdxs[i] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1];
else if(maxIdxs[i] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i];
}
}
}
else {
if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1}
for (int i = 0; i < minRank; ++i) {
if(maxIdxs[i + dimsLen] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i + dimsLen] % minShapeInfo[i + 1];
else if(maxIdxs[i + dimsLen] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i + dimsLen];
}
}
else {
for (int minI = 0, maxI = 0, dim = 0; maxI < maxRank; ++maxI) {
if(dim < dimsLen && dimsToExclude[dim] == maxI) {
++dim;
continue;
}
if(maxIdxs[maxI] == minShapeInfo[minI + 1])
minIdxs[minI] = 0;
else if(maxIdxs[maxI] > minShapeInfo[minI + 1])
minIdxs[minI] = maxIdxs[maxI] % minShapeInfo[minI + 1];
else
minIdxs[minI] = maxIdxs[maxI];
++minI;
}
}
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) {
Nd4jLong maxIdxs[MAX_RANK];
if(shape::order(maxShapeInfo) == 'c')
shape::ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
else
shape::ind2sub(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen);
return sub2Ind(shape::rank(minShapeInfo), minShapeInfo + 1, minIdxs);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) {
Nd4jLong maxIdxs[MAX_RANK];
if(shape::order(maxShapeInfo) == 'c')
shape::ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
else
shape::ind2sub(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen);
return getOffset(0, minShapeInfo + 1, minShapeInfo + shape::rank(minShapeInfo) + 1, minIdxs, shape::rank(minShapeInfo));
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
if(order(minShapeInfo) == 'c')
shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices);
else
shape::ind2sub(rankMin, const_cast<Nd4jLong*>(minShapeInfo) + 1, minIdx, indices);
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
if(order(minShapeInfo) == 'c')
shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices);
else
shape::ind2sub(rankMin, const_cast<Nd4jLong*>(minShapeInfo) + 1, minIdx, indices);
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) {
buffer[0] = 2;
buffer[1] = 1;
buffer[2] = 1;
buffer[3] = 1;
buffer[4] = 1;
buffer[6] = 1;
buffer[7] = (int)order;
nd4j::ArrayOptions::setDataType(buffer, dataType);
}
template <typename T1, typename T2>
INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) {
for (Nd4jLong e = 0; e < length; e++)
to[e] = (T2) from[e];
};
//////////////////////////////////////////////////////////////////////
INLINEDEF void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets) {
// set offset for first sub-array, it is equal to zero always
subArrOffsets[0] = 0;
// choose whether to parallelize or not
if(numOfSubArrs > 1024 /*Environment::getInstance()->elementwiseThreshold()*/) {
#pragma omp parallel // PRAGMA_OMP_PARALLEL_ARGS(private(indexes))
{
Nd4jLong* indexes = new Nd4jLong[rank];
#pragma omp for simd schedule(guided) // PRAGMA_OMP_PARALLEL_FOR
for (Nd4jLong i = 1; i < numOfSubArrs; ++i) {
shape::ind2subC(rank, shape, i, indexes);
subArrOffsets[i] = 0;
for (int j = 0; j < rank; ++j)
if(shape[j] != 1)
subArrOffsets[i] += indexes[j] * strides[j];
}
delete []indexes;
}
}
else {
Nd4jLong rankMinusOne = rank - 1;
Nd4jLong i = 1, j = rankMinusOne;
Nd4jLong* idx = new Nd4jLong[rank];
Nd4jLong* currOffset = new Nd4jLong[rank];
memset(idx, 0, sizeof(Nd4jLong) * rank);
memset(currOffset, 0, sizeof(Nd4jLong) * rank);
// nested loops - calculation of sub-array offsets (subArrOffsets)
while(j >= 0) {
if(shape[j] == 1) { --j; continue; } // ignore dimensions equal to unity
if(j == rankMinusOne) { // last dimension
for(idx[j] = 1; idx[j] < shape[j]; ++idx[j])
subArrOffsets[i++] = subArrOffsets[i-1] + strides[j];
--j;
}
else if(idx[j] < shape[j] - 1) {
currOffset[j] += strides[j];
subArrOffsets[i++] = j ? currOffset[j] + currOffset[j-1] : currOffset[j];
++idx[j];
j = rankMinusOne;
}
else
currOffset[j--] = idx[j] = 0;
}
delete []idx;
delete []currOffset;
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF void _CUDA_HD calcEws(Nd4jLong* shapeInfo, Nd4jLong len) {
const int rank = shape::rank(shapeInfo);
const Nd4jLong* shape = shape::shapeOf(shapeInfo);
const Nd4jLong* strides = shape::stride(shapeInfo);
const char order = shape::order(shapeInfo);
Nd4jLong* ews = shape::ews(shapeInfo);
if(len == -1) // calculate array length if it is not already set
len = shape::length(shapeInfo);
if(len <= 1) { // empty, scalar or unity-vector case
*ews = 1;
return;
}
int nonUnityDim(0);
if(shape::isCommonVector(shapeInfo, nonUnityDim)) {
*ews = strides[nonUnityDim];
return;
}
// check last(c)/first(f) dimension, it should be equal to 1
if((order == 'c' && shape[rank - 1] != 1 && strides[rank - 1] != 1) || (order == 'f' && shape[0] != 1 && strides[0] != 1)) {
*ews = 0;
return;
}
Nd4jLong correctStride = 1;
if(order == 'c') {
for (int i = rank - 2; i >= 0 ; i--) {
correctStride *= shape[i + 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
*ews = 0;
return;
}
}
}
else {
for (int i = 1; i < rank; ++i) {
correctStride *= shape[i - 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
*ews = 0;
return;
}
}
}
*ews = 1;
}
}
#endif /* SHAPE_H_ */
|
3_2_2.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 10000000
double cal_time(struct timespec *t_end, struct timespec *t_start)
{
double elapsedTime;
elapsedTime = (t_end->tv_sec - t_start->tv_sec) * 1000.0;
elapsedTime += (t_end->tv_nsec - t_start->tv_nsec) / 1000000.0;
return elapsedTime;
}
int main()
{
struct timespec t_start, t_end;
int i;
int temp;
int *A, *B, *AA, *BB;
A = (int *)malloc(N * sizeof(int));
B = (int *)malloc(N * sizeof(int));
AA = (int *)malloc(N * sizeof(int));
BB = (int *)malloc(N * sizeof(int));
for (i = 0; i < N; i++)
{
A[i] = rand() % 256;
B[i] = rand() % 256;
AA[i] = A[i];
BB[i] = B[i];
}
clock_gettime(CLOCK_REALTIME, &t_start);
for (i = 0; i < N; i++)
{
temp = A[i];
A[i] = B[i];
B[i] = temp;
}
clock_gettime(CLOCK_REALTIME, &t_end);
printf("Sequential time: %lf ms\n", cal_time(&t_end, &t_start));
clock_gettime(CLOCK_REALTIME, &t_start);
#pragma omp parallel for
for (i = 0; i < N; i++)
{
int temp; // Need to init every times
temp = AA[i];
AA[i] = BB[i];
BB[i] = temp;
}
clock_gettime(CLOCK_REALTIME, &t_end);
printf("Parallel time: %lf ms\n", cal_time(&t_end, &t_start));
for (i = 0; i < N; i++)
{
if (A[i] != AA[i] || B[i] != BB[i])
break;
}
if (i == N)
printf("Test pass!!!\n");
else
printf("Test failure\n");
return 0;
}
|
GB_unaryop__minv_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int64
// op(A') function: GB_tran__minv_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB045-doall1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Simplest one dimension array computation
*/
int a[100];
int main()
{
int i;
#pragma omp parallel for private(i)
for (i=0;i<100;i++)
a[i]=i;
#pragma omp parallel for private(i)
for (i=0;i<100;i++)
a[i]=a[i]+1;
for (i=0;i<100;i++)
printf("%d\n",a[i]);
return 0;
}
|
scalaf.c | /*
Código secuencial para la simulación de flujos de lava volcánica
-Sergio Augusto Gélvez Cortés
*/
#include "scalaf.h"
#include "math.h"
#include "string.h"
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
#define NUMBER_OF_TRHEADS 2
// Función que calcula la viscosidad a partir de la temperatura,
// tomada del artículo de Miyamoto y Sasaki
double visc(double temperature)
{
return pow(10, (20.0 * (exp(-0.001835 * (temperature - 273.0)))));
}
// Función que calcula la tensión cortante a partir de la temperatura,
// tomada del artículo de Miyamoto y Sasaki
double yield(double temperature)
{
return pow(10, (11.67 - 0.0089 * (temperature - 273.0)));
}
// Función para colocar los cráteres en la matriz,
// toma los datos de un punto en 2D y revisa que estén en el rango correcto.
int placeCraters(mapCell *A, const point2D *P, int totalRows, int totalColumns,
int totalCraters)
{
int craterColumn, craterRow;
// omp_set_num_threads(4);s
#pragma omp parallel for
for (int i = 0; i < totalCraters; ++i)
{
craterRow = P[i].x;
craterColumn = P[i].y;
if ((craterRow > -1) && (craterRow < totalRows))
{
if ((craterColumn > -1) && (craterColumn < totalColumns))
{
A[craterRow * totalColumns + craterColumn].isVent = 1;
}
}
}
}
// Función para leer puntos en 2D desde un archivo de texto plano.
// X para Filas, Y para Columnas.
int readCratersPositionFile(char *path, int numberOfCraters,
point2D *craterPositions)
{
FILE *cratersPositionFile;
char lineBuffer[1000];
char *token;
int error;
printf("\nLeyendo posición de los crátetes...");
if (cratersPositionFile = fopen(path, "r"))
{
int i = 0;
while ((fgets(lineBuffer, 1000, cratersPositionFile) != NULL) &&
(i < numberOfCraters))
{
token = strtok(lineBuffer, ",");
craterPositions[i].x = atol(token);
token = strtok(NULL, ",");
craterPositions[i].y = atol(token);
i += 1;
}
printf("\n\t-Cráteres creados correctamente.");
fclose(cratersPositionFile);
return 1;
}
else
{
printf("\nERROR: Archivo con posición de los cráteres no encontrado!");
return 0;
}
}
// Función para inicializar los valores en las celdas del terreno,
// las altitudes del terreno se leen desde un archivo de texto plano
// y los demás valores en la celda son asignados por defecto.
int readTerrainFile(char *path, int maxRows, int maxColumns, mapCell *map)
{
FILE *mapAltitudesFile;
char lineBuffer[8192];
char *token;
int error;
int i, j;
double altitudeValue;
if (mapAltitudesFile = fopen(path, "r"))
{
printf("\nLeyendo mapa de alturas del terreno...");
i = 0;
// Lee una linea completa y convierte en token.
// Inicializa todos los valores en cada celda.
while (fgets(lineBuffer, 1000, mapAltitudesFile) != NULL)
{
j = 0;
token = strtok(lineBuffer, ",");
while (token)
{
if (j < maxColumns)
{
altitudeValue = atof(token);
map[i * maxColumns + j].altitude = altitudeValue;
map[i * maxColumns + j].thickness = 0;
map[i * maxColumns + j].isVent = 0;
map[i * maxColumns + j].temperature = 273.0;
map[i * maxColumns + j].yield = 0.0;
map[i * maxColumns + j].viscosity = 0.0;
map[i * maxColumns + j].exits = 0;
j += 1;
}
token = strtok(NULL, ",");
}
if (j >= maxColumns)
{
i += 1;
}
}
printf("\n\t- Terreno inicializado correctamente.");
fclose(mapAltitudesFile);
return 1;
}
else
{
printf("\nERROR: Mapa de alturas no encontrado!");
return 0;
}
}
/* La función prefunción "agranda" la matriz, colocando una fila y una
* columna al principio y al final (la matriz tiene dimensiones (MAX_ROWS
* +2)*(MAX_COLS+2). De antemano me disculpo por la cantidad de veces que
* imprimo la matriz. */
void preFuncion(int filas, int columnas, const mapCell *A, mapCell *C)
{
int i, j, c, f;
printf("\nAgregando filas y columnas extras en los bordes...\n");
// cargar matriz a memoria
// y acá debería agrandar la matriz
// las celdas extras tienen altitud 100000 metros, grosor de capas 0,
// temperatura 0
mapCell *B;
B = (mapCell *)malloc((filas + 2) * (columnas + 2) * sizeof(mapCell));
// crear elementos para la matriz agrandada. B[filas+2][columnas+2];
f = 0;
for (i = 0; i < filas + 2; ++i)
{
if (!(i == 0 || i == (filas + 1)))
{
c = 0;
for (j = 0; j < columnas + 2; ++j)
{
if (!(j == 0 || j == (columnas + 1)))
{
// acá van los valores no en los bordes
B[(columnas + 2) * i + j].altitude = A[(columnas)*f + c].altitude;
B[(columnas + 2) * i + j].thickness = A[(columnas)*f + c].thickness;
B[(columnas + 2) * i + j].temperature =
A[(columnas)*f + c].temperature;
B[(columnas + 2) * i + j].isVent = A[(columnas)*f + c].isVent;
B[(columnas + 2) * i + j].yield = 0;
B[(columnas + 2) * i + j].viscosity = 0;
B[(columnas + 2) * i + j].exits = 0;
B[(columnas + 2) * i + j].inboundV = 0;
B[(columnas + 2) * i + j].outboundV = 0;
B[(columnas + 2) * i + j].inboundQ = 0;
c += 1;
// llenar los valores originales
// ya que acá se trate de los posiciones diferentes a los
// nuevos bordes.
}
else
{
// crear las filas y columnas extras
// en la fila y columna 0, y en la fila y columna
// filas y columnas, o sea los máximos.
// esos valores son 0 y altitud muy grande, más grande
// que la altitud del everest.
// en este caso son solo los bordes de las "columnas"
B[(columnas + 2) * i + j].altitude = 100000;
B[(columnas + 2) * i + j].thickness = 0;
B[(columnas + 2) * i + j].temperature = 0;
B[(columnas + 2) * i + j].isVent = 0;
B[(columnas + 2) * i + j].yield = 0;
B[(columnas + 2) * i + j].viscosity = 0;
B[(columnas + 2) * i + j].exits = 0;
B[(columnas + 2) * i + j].inboundV = 0;
B[(columnas + 2) * i + j].outboundV = 0;
B[(columnas + 2) * i + j].inboundQ = 0;
}
}
f += 1;
}
else
{
for (j = 0; j < columnas + 2; ++j)
{
// crear las filas y columnas extras
// en la fila y columna 0, y en la fila y columna
// filas y columnas, o sea los máximos.
// esos valores son 0 y altitud muy grande, más grande
// que la altitud del everest.
// en este caso son solo los bordes de las "filas"
B[(columnas + 2) * i + j].altitude = 100000;
B[(columnas + 2) * i + j].thickness = 0;
B[(columnas + 2) * i + j].temperature = 0;
B[(columnas + 2) * i + j].isVent = 0;
B[(columnas + 2) * i + j].yield = 0;
B[(columnas + 2) * i + j].viscosity = 0;
B[(columnas + 2) * i + j].exits = 0;
B[(columnas + 2) * i + j].inboundV = 0;
B[(columnas + 2) * i + j].outboundV = 0;
B[(columnas + 2) * i + j].inboundQ = 0;
}
}
}
// acá se copia la nueva matriz a la matrix pasada por referencia
// esa será el resultado.
memcpy(C, B, (filas + 2) * (columnas + 2) * sizeof(mapCell));
printf("Salir de la función agrandar...\n");
// En C se guardaron los resultados.
// Llamamos la función principal, la que hace los cálculos de la
// simulación:
// FuncionPrincipal(MAX_ROWS+1, MAX_COLS+1, B, C);
}
/* La función postfunción "reduce" la matriz, eliminando una fila y una
columna al principio y al final (la matriz tiene dimensiones (MAX_ROWS
+2)*(MAX_COLS+2) y el resultado MAX_ROWS*MAX_COLS. */
void postFuncion(int filas, int columnas, const mapCell *A, mapCell *C)
{
// veamos si entra a la funcion
int i, j;
printf("eliminando filas y columnas extras de la matriz...\n");
mapCell *B;
int n_columnas = columnas - 2;
int n_filas = filas - 2;
B = (mapCell *)malloc(n_filas * n_columnas * sizeof(mapCell));
int f, c;
c = 0;
f = 0;
// reducir la matriz
for (i = 0; i < filas; i++)
{
if (!(i == 0 || i >= (filas - 1)))
{
c = 0;
for (j = 0; j < columnas; j++)
{
if (!(j == 0 || j >= (columnas - 1)))
{
B[f * n_columnas + c].altitude = A[i * columnas + j].altitude;
B[f * n_columnas + c].thickness = A[i * columnas + j].thickness;
B[f * n_columnas + c].temperature = A[i * columnas + j].temperature;
B[f * n_columnas + c].isVent = A[i * columnas + j].isVent;
B[f * n_columnas + c].yield = A[i * columnas + j].yield;
B[f * n_columnas + c].viscosity = A[i * columnas + j].viscosity;
B[f * n_columnas + c].exits = A[i * columnas + j].exits;
B[f * n_columnas + c].inboundV = A[i * columnas + j].inboundV;
B[f * n_columnas + c].outboundV = A[i * columnas + j].outboundV;
c += 1;
}
}
f += 1;
}
}
// copiar la matriz reducida a la pasada por parámetro.
memcpy(C, B, n_filas * n_columnas * sizeof(mapCell));
}
// declaración de la variable global que guarda las condiciones iniciales
// no es muy recomendable, pero no hay tanto tiempo.
initialConditions c0;
// Esta es la función donde se calcula todo.
// en la versión CUDA es sustituida.
void FuncionPrincipal(int filas, int columnas, mapCell *A, mapCell *C)
{
// falta hacer una función visualizar que ignore las columnas extras
// Asumimos al iniciar la función que la matriz ya viene aumentada.
int i, j, l, m;
int flujos = 0;
double deltaV = 0.0, deltaT = 0.0, deltaH = 0.0, maxV = 0.0;
double deltaQ = 0.0, deltaQ_rad = 0.0, deltaQ_flu = 0.0;
double Q_base = 0.0;
double Aref = 0.0, Href = 0.0;
double Acomp = 0.0, Hcomp = 0.0, Hcrit = 0.0, Hcrit2 = 0.0;
double cArea = c0.cellWidth * c0.cellWidth;
double alfa = 0.0;
// primer ciclo que es solo para inicializar
#pragma omp parallel for
for (i = 1; i < filas - 1; i++)
{
// #pragma omp parallel for num_threads(4)
for (j = 1; j < columnas - 1; j++)
{
// primero calcular la viscosidad y el yield, e inicializar los flujos
A[i * columnas + j].viscosity = visc(A[i * columnas + j].temperature);
A[i * columnas + j].yield = yield(A[i * columnas + j].temperature);
A[i * columnas + j].inboundV = 0.0;
A[i * columnas + j].inboundQ = 0.0;
A[i * columnas + j].outboundV = 0.0;
A[i * columnas + j].exits = 0;
}
}
// ciclo para evaluar la cantidad de salidas que tiene cada celda
#pragma omp parallel for
for (i = 1; i < filas - 1; i++)
{
// #pragma omp parallel for num_threads(4)
for (j = 1; j < columnas - 1; j++)
{
Href = A[i * (columnas) + j].thickness;
Aref = A[i * (columnas) + j].altitude;
flujos = 0;
for (l = -1; l < 2; l++)
{
for (m = -1; m < 2; m++)
{
if (!(m == 0 && l == 0))
{
int ni = i + l;
int nj = j + m;
Acomp = A[(ni) * (columnas) + (nj)].altitude;
Hcomp = A[(ni) * (columnas) + (nj)].thickness;
// alfa = atan((Acomp-Aref)/c0.anchoCelda);
Hcrit =
fabs((A[(ni) * (columnas) + (nj)].yield *
sqrt((Acomp - Aref) * (Acomp - Aref) +
c0.cellWidth * c0.cellWidth)) /
(density * gravity * ((Acomp - Aref) - (Hcomp - Href))));
if ((Hcomp > Hcrit) && (Hcrit > 1e-8))
{
// mas depuración
// como esta operacion se repite es mejor hacerla una sola vez
// calcular el valor del volumen que sale
if (fabs(Hcomp + Acomp) >
fabs(Href + Aref))
{ // aca ya asumi que es plano
A[ni * (columnas) + nj].exits += 1;
}
}
}
}
}
}
}
// acá solo se calculan los flujos, en el primer ciclo. Como uno de los
// los supuestos de los autómatas celulares es que los estados solo se
// actualizan al final, se necesita un segundo ciclo. Esto es crucial para el
// mapeo.
for (i = 1; i < filas - 1; i++)
{
for (j = 1; j < columnas - 1; j++)
{
double deltaQ_flu_vent = 0.0;
if (A[i * columnas + j].isVent == 1)
{
// variables temporales para almacenar el delta de volumen y de
// temperatura. si hay un crater se aumenta el thickness en un valor
// igual a la tasa de erupción sobre el area de la celda por el delta de
// tiempo
deltaV = (c0.eruptionRate) * c0.deltat;
A[i * columnas + j].inboundV += (deltaV);
deltaQ_flu_vent =
(deltaV * c0.eruptionTemperature) * heatCapacity * density;
A[i * columnas + j].inboundQ += deltaQ_flu_vent;
// printf("\nCrater %d,%d inbound %lf ", i-1, j-1, deltaV);
}
deltaV = 0.0;
deltaH = 0.0;
// depuracion, recordar que las celda se cuentan descartando las filas y
// columnas extras printf("\n\n\nDatos de la celda %d,%d : Altitud: %lf -
// Grosor: %lf - Temperatura: %lf - Viscosidad: %lf - Esfuerzo: %lf\n",
// i-1, j-1, A[i*columnas+j].altitude, A[i*columnas+j].thickness,
// A[i*columnas+j].temperature, A[i*columnas+j].viscosity,
// A[i*columnas+j].yield); acá arranco el proceso de detectar cuantos
// flujos salen de una celda y de esta manera puedo saber como dividir el
// flujo cuando necesite sumarlos en el paso siguiente. reviso las celdas
// adyacentes:
Href = A[i * (columnas) + j].thickness;
Aref = A[i * (columnas) + j].altitude;
// printf("\n\nEl valor de flujos que sale de %d,%d es %d
// \n",i-1,j-1,A[i*(columnas) + j].exits); crear el ciclo que recorre los
// adyacentes y calcula el grosor critico en los flujos, agregar el deltaV
// correcto. y el correspondiente deltaQ comparar a ver cuantas salidas
// tiene cada celda. aca pongo salidas de depuración printf("Analizando
// flujos de lava relacionados con la celda %d,%d que tiene valores de
// altitud %lf y grosor de capa %lf:\n", i-1, j-1, Aref, Href); aca
// calculo los flujos como tal, vamos a calcular directamente, sin
// provisión de orden en los flujos, agregar el deltaV correcto. y el
// correspondiente deltaQ
for (l = -1; l < 2; l++)
{
for (m = -1; m < 2; m++)
{
if (!(m == 0 && l == 0))
{
int ni = i + l;
int nj = j + m;
Acomp = A[(ni) * (columnas) + (nj)].altitude;
Hcomp = A[(ni) * (columnas) + (nj)].thickness;
deltaH = Hcomp - Href;
// alfa = atan((Acomp-Aref)/c0.anchoCelda);
alfa = 0;
// printf("\nComparando celda %d,%d con %d,%d (ref) Angulo %lf,
// Acomp: %lf - Aref: %lf - Cos alfa %lf, Sin alfa %lf -- yield %lf
// - visc %lf - gravedad %lf densidad %lf - ancho %lf", ni-1, nj-1,
// i-1, j-1, alfa/M_PI, Acomp, Aref, cos(alfa), sin(alfa),
// A[(ni)*(columnas)+(nj)].yield, A[(ni)*(columnas)+(nj)].viscosity,
// gravity, density, c0.anchoCelda);
Hcrit = fabs((A[(ni) * (columnas) + (nj)].yield *
sqrt((Acomp - Aref) * (Acomp - Aref) +
c0.cellWidth * c0.cellWidth)) /
(density * gravity * ((Acomp - Aref) - (deltaH))));
// Hcrit =
// ((A[(ni)*(columnas)+(nj)].yield)/((density*gravity)*(sin(alfa)-((Hcomp-Href)/c0.anchoCelda)*cos(alfa))));
// esta version funcionaría perfecto para topografías planas? o por
// el angulo ya esta incluido? hallar el grosor critico para cada
// celda adyacente. Hcrit=(Hcomp-Href)/c0.anchoCelda; salidas de
// depuración que hacen falta
// printf("Comparando con celda %d,%d que tiene valores de altitud
// %lf y grosor de capa %lf: -- grosor critico %lf \n", ni, nj,
// Acomp, Hcomp, Hcrit);
// printf("\nEl Hcomp es %lf, el Href es %lf, el ancho es %lf, El Hc
// es %lf, salidas de la celda de comp %d, celda %d,%d", Hcomp,
// Href, c0.anchoCelda, Hcrit, A[ni*(columnas)+nj].exits, ni-1,
// nj-1);
if ((Hcomp > Hcrit) && (Hcrit > 1e-8))
{
// mas depuración
// como esta operacion se repite es mejor hacerla una sola vez
double h_hc = Hcomp / Hcrit;
// calcular el valor del volumen que sale
if ((fabs(Hcomp + Acomp) > fabs(Href + Aref)) &&
(A[ni * (columnas) + nj].exits > 0))
{
// aca ya asumi que es plano
// deltaV =
// (1.0/A[ni*(columnas)+nj].exits)*((A[ni*columnas+nj].yield*Hcrit*Hcrit*c0.anchoCelda)/(3*A[ni*columnas+nj].viscosity))*(h_hc*h_hc*h_hc-1.5*h_hc*h_hc+0.5)*(delta_t);
// estoy jugando
deltaV = (1.0 / A[ni * (columnas) + nj].exits) *
((A[ni * columnas + nj].yield * Hcrit * Hcrit *
c0.cellWidth) /
(3 * A[ni * columnas + nj].viscosity)) *
(h_hc * h_hc * h_hc - 1.5 * h_hc * h_hc + 0.5) *
(c0.deltat);
maxV = (deltaH * cArea) / (2 * A[ni * (columnas) + nj].exits);
if (maxV < deltaV)
{
// luz, fuego, destrucción
deltaV = maxV;
}
}
else
{
deltaV = 0.0;
}
// printf("\nEl valor de dv que sale de %d,%d es
// %lf",ni-1,nj-1,deltaV); El volumen cedido se suma al volumen
// total a restar a la celda Y tambien al volumen a sumar.
// machetazos horribles
A[(ni) * (columnas) + (nj)].outboundV += (deltaV);
A[(i) * (columnas) + (j)].inboundV += (deltaV);
A[(i) * (columnas) + (j)].inboundQ +=
deltaV * A[ni * columnas + nj].temperature * density *
heatCapacity;
// se incluye un estimado del calor transferido, temp del origen
// por delta
// A[(i)*(columnas)+(j)].inboundQ += (deltaV *
// A[ni*columnas+nj].temperature) * heatCapacity * density;
// A[(ni)*(columnas)+(nj)].inboundQ -= (deltaV *
// A[ni*columnas+nj].temperature) * heatCapacity * density;
// reviso el número de flujos
// flujos += 1;
// calcular el inboundQ por flujos
}
}
}
}
// aca se agregan los flujos en caso de ser un crater
// acá sumamos los volumenes y convertimos en grosor
}
}
// segundo ciclo, consolidamos los flujos, calculamos nuevos grosores
// y temperaturas.
// luego agregamos crateres y calculamos la temperatura perdida por radiacion
#pragma omp parallel for
for (i = 1; i < filas - 1; i++)
{
for (j = 1; j < columnas - 1; j++)
{
deltaT = 0.0;
deltaQ = 0.0;
deltaQ_rad = 0.0;
deltaQ_flu = 0.0;
Q_base = 0.0;
double deltaQ_flu_in = 0.0, deltaQ_flu_out = 0.0;
double thickness_0 = 0.0, temperature_0 = 0.0;
// Solo necesito calcular el valor de T teniendo en cuenta el calor
// y el valor de thickness teniendo en cuenta el volumen
// balance de volumenes, ojo.
thickness_0 = A[i * columnas + j].thickness;
temperature_0 = A[i * columnas + j].temperature;
Q_base = thickness_0 * temperature_0 * density * heatCapacity;
// Cuando el grosor el negligible con relación al area, no hay perdida de
// calor if (A[i*columnas+j].thickness > 1e-8) {
if (A[i * columnas + j].thickness > 1e-4)
{
// A[i*columnas+j].temperature = (A[i*columnas+j].inboundQ +
// (thickness_0 * cArea *
// A[i*columnas+j].temperature))/(A[i*columnas+j].thickness * cArea);
deltaQ_rad =
(-1.0) * SBConst * (cArea)*emisivity * c0.deltat *
(A[i * columnas + j].temperature * A[i * columnas + j].temperature *
A[i * columnas + j].temperature * A[i * columnas + j].temperature);
}
else
{
deltaQ_rad = 0;
}
A[i * columnas + j].thickness = thickness_0 +
(A[i * columnas + j].inboundV / (cArea)) -
(A[i * columnas + j].outboundV / (cArea));
deltaQ_flu_in = A[i * columnas + j].inboundQ;
deltaQ_flu_out = A[i * columnas + j].outboundV * temperature_0 * density *
heatCapacity;
deltaQ_flu = deltaQ_flu_in - deltaQ_flu_out;
// revisar por 0 en el thickness y no hacer la operacion en ese caso
// calculo de la perdida, un nuevo deltaQ
// printf("\nPara Celda %d,%d antes de crateres el valor de");
// printf("\nPara Celda %d,%d antes de crateres el valor de grosor es %lf,
// el de calor por flujo %lf, el de calor perdido por radiacion es %lf, el
// de calor base es %lf, el de temperatura es %lf \n", i-1, j-1,
// A[i*columnas + j].thickness, deltaQ_flu, deltaQ_rad, Q_base,
// A[i*columnas + j].temperature); printf("\nPara Celda %d,%d antes de
// crateres el valor de calor de salida es %lf, de calor de entrada es
// %lf", i-1, j-1, deltaQ_flu_out, deltaQ_flu_in); mensajes de depuración.
// Acá se cálcula si es un crater o no, y con eso se cálcula
// un nuevo grosor.
// se revisa si en la celda hay un crater
deltaQ = Q_base + deltaQ_flu + deltaQ_rad;
if (A[i * columnas + j].thickness > 1e-8)
{
A[i * columnas + j].temperature =
deltaQ /
(density * heatCapacity * cArea * A[i * columnas + j].thickness);
}
else
{
A[i * columnas + j].temperature = 273.0;
}
// printf("\nPara Celda %d,%d despues de crateres el valor de grosor es
// %lf, el de calor por flujo %lf, el de calor perdido por radiacion es
// %lf, el de calor base es %lf, el de temperatura es %lf, el calor
// agregado por crater es bastante", i-1, j-1, A[i*columnas +
// j].thickness, deltaQ_flu, deltaQ_rad, Q_base, A[i*columnas +
// j].temperature); printf("\nEl la celda %d,%d el valor de grosor es %lf,
// el de inbound es %lf y el de outboud es %lf. El valor de calor es %lf
// - la temperatura anterior es %lf, el grosor anterior es %lf", i-1, j-1,
// A[i*columnas+j].thickness, A[i*columnas+j].inboundV,
// A[i*columnas+j].outboundV, A[i*columnas+j].inboundQ, temperature_0,
// thickness_0); printf("\nParametros cArea %lf, heatCapacity %lf, density
// %lf temp %lf", cArea, heatCapacity, density,
// A[i*columnas+j].temperature); printf("\nDeltaT %lf - Grosor: %lf -
// deltaQ_rad %lf - inboundQ %lf\n\n", deltaT,
// A[(i)*(columnas)+(j)].thickness, deltaQ_rad, A[i*columnas+j].inboundQ);
}
}
memcpy(C, A, filas * columnas * sizeof(mapCell));
}
// nota, falta implementar las cifras significativas
int prepararVisualizacionGNUPlot(int secuencia, char *path, int filas,
int columnas, mapCell *matriz,
int cifrasSignif, double w, double x0,
double y0)
{
// Esta función genera los dos archivos necesarios para producir una imagen
// en GNU plot. Los archivos son:
// 1. datafile.dat (o variantes) que contiene los datos de altitud,
// temperatura y grosor de la capa, teniendo en cuenta que altitud ya tiene en
// cuenta el de la capa, pero este se incluye para dar mas opciones
// 2. archivo de comandos, que incluye lo necesario para poder configurar
// el área de dibujo, las escalas, las leyendas, etc.
// Los parámetros son matriz, que es la matriz completa con los resultados
// nombreArchivo, que es el nombre base de los archivos de datos a producir
// y secuencia, en el caso en el que se produzcan varios archivos para
// generar una animación, nos dará el orden. El resultado es un plot de gnu
// plot mas una imagen png exportada a partir de eso, que llevará como nombre
// nombreArchivo + secuencia.png
FILE *datosAltitud;
FILE *encabezado;
size_t sizep;
int i, j, flag;
double xcoord, ycoord, zcoord, temp;
sizep = strlen(path);
long int cont;
char newpath[500], tsec[20], pathenca[550], command[700], f_path[1024];
// printf("\nNumero de caracteres de la ruta del archivo %d\n",(int)sizep);
if (sizep != 0)
{
strcpy(newpath, path);
sprintf(tsec, "%d", secuencia);
strcat(newpath, tsec);
printf("Escribiendo archivo: %s\n", newpath);
datosAltitud = fopen(newpath, "w");
if (datosAltitud != NULL)
{
printf("Guardando en archivo...\n");
// Escribiendo los datos
cont = 0;
for (j = 0; j < columnas; ++j)
{ // aca debo cambiar esto, altitude +
// thickness da la altura, osea la z
for (i = 0; i < filas; ++i)
{
xcoord = x0 + i * w;
ycoord = y0 + j * w;
zcoord = matriz[columnas * i + j].thickness +
matriz[columnas * i + j].altitude;
temp = matriz[columnas * i + j].temperature;
fprintf(datosAltitud, "%6.3lf %6.3lf %6.3lf %6.3lf\n", xcoord, ycoord,
zcoord, temp);
cont += 1;
}
// para las isolineas
fprintf(datosAltitud, "\n");
}
printf("Se escribieron %ld elementos.\n\n", cont);
fclose(datosAltitud);
}
else
{
printf("***\nError al intentar escribir el archivo de datos.\n***\n\n");
}
strcpy(pathenca, newpath);
strcat(pathenca, "_enca");
printf("Guardando archivo de encabezado %s \n", pathenca);
encabezado = fopen(pathenca, "w");
if (encabezado != NULL)
{
// escribiendo el encabezado, pero en GNUPLOT es diferente
// se muestran opciones para png y eps
// fprintf(encabezado,"set terminal png size 400,300 enhanced font
// \"Helvetica,20\"\n\n",newpath);
fprintf(encabezado, "set terminal png size 1366,720 enhanced\n\n",
newpath);
fprintf(encabezado, "set output '%s.png'\n", newpath);
// dejo estas líneas comentadas por si necesito configurar mejor
// la visualización.
// fprintf(encabezado,"set term postscript eps enhanced color\n");
// fprintf(encabezado,"set output '%s.eps'\n",newpath);
// fprintf(encabezado,"set autoscale y #set autoscale # scale
// axes automatically\n"); fprintf(encabezado,"unset label # remove
// any previous labels\n"); fprintf(encabezado,"set encoding utf8\n\n");
// fprintf(encabezado,"set isosamples 100\n");
// fprintf(encabezado,"set samples 100\n\n");
// fprintf(encabezado,"unset key\n");
// fprintf(encabezado,"unset grid\n");
// fprintf(encabezado,"unset border\n");
// fprintf(encabezado,"unset tics\n");
fprintf(encabezado, "set xrange [0:20]\n");
fprintf(encabezado, "set yrange [0:20]\n");
fprintf(encabezado, "set zrange [0:20]\n\n");
fprintf(encabezado, "set colorbox\n");
// fprintf(encabezado,"unset surface\n");
fprintf(encabezado, "set pm3d\n");
fprintf(encabezado, "set title \"%d\"\n\n", secuencia);
fprintf(encabezado, "splot \"%s\" using 1:2:3:4 with pm3d\n", newpath);
fprintf(encabezado, "reset\n");
fclose(encabezado);
}
else
{
printf(
"\n***\nError al intentar escribir el archivo de ecabezado.\n***\n");
}
// se ejecuta el gnu plot para generar el archivo de imagen
limpiarPath(pathenca, f_path);
strcpy(command, "gnuplot ");
strcat(command, f_path);
printf("Ejecutando comando: %s\n", command);
flag = system(command);
printf("Mensaje del comando %d\n", flag);
}
}
// Esta es una copia de la funcion anterior que ignora los espacios extras
// de la matriz aumentada
int prepararVisualizacionGNUPlot_2(int secuencia, char *path, int filas,
int columnas, mapCell *matriz,
int cifrasSignif, double w, double x0,
double y0)
{
// Esta función genera los dos archivos necesarios para producir una imagen
// en GNU plot. Los archivos son:
// 1. datafile.dat (o variantes) que contiene los datos de altitud,
// temperatura y grosor de la capa, teniendo en cuenta que altitud ya tiene en
// cuenta el de la capa, pero este se incluye para dar mas opciones
// 2. archivo de comandos, que incluye lo necesario para poder configurar
// el área de dibujo, las escalas, las leyendas, etc.
// Los parámetros son matriz, que es la matriz completa con los resultados
// nombreArchivo, que es el nombre base de los archivos de datos a producir
// y secuencia, en el caso en el que se produzcan varios archivos para
// generar una animación, nos dará el orden. El resultado es un plot de gnu
// plot mas una imagen png exportada a partir de eso, que llevará como nombre
// nombreArchivo + secuencia.png
FILE *datosAltitud;
FILE *encabezado;
size_t sizep;
int i, j, flag;
double xcoord, ycoord, zcoord, temp;
sizep = strlen(path);
long int cont;
char newpath[500], tsec[20], pathenca[550], command[700], f_path[1024];
// printf("\nNumero de caracteres de la ruta del archivo %d\n",(int)sizep);
if (sizep != 0)
{
strcpy(newpath, path);
sprintf(tsec, "%d", secuencia);
strcat(newpath, tsec);
printf("Escribiendo archivo: %s\n", newpath);
datosAltitud = fopen(newpath, "w");
if (datosAltitud != NULL)
{
printf("Guardando en archivo...\n");
// Escribiendo los datos
cont = 0;
for (j = 1; j < columnas - 1; ++j)
{ // aca debo cambiar esto, altitude +
// thickness da la altura, osea la z
for (i = 1; i < filas - 1; ++i)
{
xcoord = x0 + (i - 1) * w;
ycoord = y0 + (j - 1) * w;
zcoord = matriz[columnas * i + j].thickness +
matriz[columnas * i + j].altitude;
temp = matriz[columnas * i + j].temperature;
fprintf(datosAltitud, "%6.3lf %6.3lf %6.8lf %6.8lf\n", xcoord, ycoord,
zcoord, temp);
cont += 1;
}
// para las isolineas
fprintf(datosAltitud, "\n");
}
printf("Se escribieron %ld elementos.\n\n", cont);
fclose(datosAltitud);
}
else
{
printf("***\nError al intentar escribir el archivo de datos.\n***\n\n");
}
strcpy(pathenca, newpath);
strcat(pathenca, "_enca");
printf("Guardando archivo de encabezado %s \n", pathenca);
encabezado = fopen(pathenca, "w");
if (encabezado != NULL)
{
// escribiendo el encabezado, pero en GNUPLOT es diferente
// se muestran opciones para png y eps
// fprintf(encabezado,"set terminal png size 400,300 enhanced font
// \"Helvetica,20\"\n\n",newpath);
fprintf(encabezado, "set terminal png size 1366,720 enhanced\n\n",
newpath);
fprintf(encabezado, "set output '%s.png'\n", newpath);
// dejo estas líneas comentadas por si necesito configurar mejor
// la visualización.
// fprintf(encabezado,"set term postscript eps enhanced color\n");
// fprintf(encabezado,"set output '%s.eps'\n",newpath);
// fprintf(encabezado,"set autoscale y #set autoscale # scale
// axes automatically\n"); fprintf(encabezado,"unset label # remove
// any previous labels\n"); fprintf(encabezado,"set encoding utf8\n\n");
// fprintf(encabezado,"set isosamples 100\n");
// fprintf(encabezado,"set samples 100\n\n");
// fprintf(encabezado,"unset key\n");
fprintf(encabezado, "set grid\n");
// fprintf(encabezado,"unset border\n");
// fprintf(encabezado,"unset tics\n");
fprintf(encabezado, "set xrange [0:%d]\n", filas - 2);
fprintf(encabezado, "set yrange [0:%d]\n", filas - 2);
fprintf(encabezado, "set zrange [0:3]\n\n");
fprintf(encabezado, "set colorbox\n");
// fprintf(encabezado,"unset surface\n");
fprintf(encabezado, "set pm3d\n");
fprintf(encabezado, "set title \"%d\"\n\n", secuencia);
fprintf(encabezado, "splot \"%s\" using 1:2:3:4 with pm3d\n", newpath);
fprintf(encabezado, "reset\n");
fclose(encabezado);
}
else
{
printf(
"\n***\nError al intentar escribir el archivo de ecabezado.\n***\n");
}
// se ejecuta el gnu plot para generar el archivo de imagen
limpiarPath(pathenca, f_path);
strcpy(command, "gnuplot ");
strcat(command, f_path);
printf("Ejecutando comando: %s\n", command);
flag = system(command);
printf("Mensaje del comando %d\n", flag);
}
}
/*
* Función para obtener el path actual, y de esta manera generar
* las imagenes en el directorio actual.
*/
int obtenerPath(char path[])
{
char cwd[1024];
if (getcwd(cwd, sizeof(cwd)) != NULL)
{
fprintf(stdout, "Current working dir: %s\n", cwd);
strcpy(path, cwd);
return 0;
}
else
{
perror("getcwd() error");
return 1;
}
}
// función para colocar \ donde sean necesarios y de esta manera
// poder usar paths y nombres de archivos con espacios.
int limpiarPath(char path[], char spath[])
{
char r_path[1024], f_path[1024];
int i, j;
int lg = 0;
strcpy(r_path, path);
lg = strlen(r_path);
j = 0;
#pragma omp parallel for
for (i = 0; i < lg; ++i)
{
if (r_path[i] == ' ')
{
f_path[j] = '\\';
f_path[j + 1] = ' ';
j += 2;
}
else
{
f_path[j] = r_path[i];
j += 1;
}
}
f_path[j] = '\0';
strcpy(spath, f_path);
}
int generarAnimacionGNUPlot(char nombreArchivo[], int secuencia)
{
/* ----------------------------------------------------------
* En construcción.
* "mencoder mf://*.%s -mf w=1366:h=720:fps=5:type=png -ovc copy -oac copy -o
* %S", "png", nombre.avi Función para generar un video a partir de imágenes
* fijas.
* ---------------------------------------------------------- */
}
// Acá va la función main.
int main(int argc, char *argv[])
{
double itime, ftime, exec_time;
itime = omp_get_wtime();
int i, flag = 0;
mapCell *testPoint, *resultPoint, *resultCalc, *resultPoint2;
point2D *crateres;
int fila = 0;
int columna = 0;
int puntosCrater = 0;
char s_path[1024];
char a_path[1024];
char etiqueta[1024];
char path[1024];
int option;
while ((option = getopt(argc, argv, "t:v:w:s:a:r:c:p:e:n:")) != -1)
{
switch (option)
{
case 't':
// Temperatura de erupción
c0.eruptionTemperature = atof(optarg);
printf("%s", optarg);
break;
case 'v':
// Velocidad de erupción
c0.eruptionRate = atof(optarg);
break;
case 'w':
// Ancho de las celdas cuadradas
c0.cellWidth = atof(optarg);
break;
case 's':
// Archivo de ubicación de crateres
strcpy(s_path, optarg);
break;
case 'a':
// Archivo de altitudes
strcpy(a_path, optarg);
break;
case 'r':
// Número de filas
c0.maxRows = atol(optarg);
break;
case 'c':
// Número de columnas
c0.maxColumns = atol(optarg);
break;
case 'p':
// Número de cráteres
puntosCrater = atol(optarg);
break;
case 'e':
// Nombre base de la iteración
strcpy(etiqueta, optarg);
break;
case 'n':
// Numero de pasos de tiempo
c0.timeSteps = atol(optarg);
break;
}
}
// dt esta de momento hardcoded, lo cambiaré más adelante
c0.deltat = time_delta;
// prueba de los parámetros ingresados
// printf("\n\nparametros leidos filas=%d columnas=%d ancho=%lf velocidad=%lf
// temperatura=%lf crateres=%d \n", c0.maxRows, c0.maxColumns, c0.anchoCelda,
// c0.eRate, c0.eTemp, puntosCrater); leer datos de topografía ahora se tienen
// las filas y columnas, se pueden crear el puntero que representa la matriz
// tambien se pueden crear los punteros que representan las variantes
// agrandadas y reducidas de la matriz
// falta: liberar la memoria en cada paso.
testPoint = (mapCell *)malloc(c0.maxRows * c0.maxColumns * sizeof(mapCell));
resultPoint = (mapCell *)malloc((c0.maxRows + 2) * (c0.maxColumns + 2) *
sizeof(mapCell));
resultCalc = (mapCell *)malloc((c0.maxRows + 2) * (c0.maxColumns + 2) *
sizeof(mapCell));
resultPoint2 =
(mapCell *)malloc(c0.maxRows * c0.maxColumns * sizeof(mapCell));
// Leer el mapa de alturas
if (readTerrainFile(a_path, c0.maxRows, c0.maxColumns, testPoint))
{
// Crear puntero a todos los cráteres y leer archivo de posición de estos.
crateres = (point2D *)malloc(puntosCrater * sizeof(point2D));
if (readCratersPositionFile(s_path, puntosCrater, crateres))
{
placeCraters(testPoint, crateres, c0.maxRows, c0.maxColumns,
puntosCrater);
preFuncion(c0.maxRows, c0.maxColumns, testPoint, resultPoint);
#pragma omp parallel for
for (i = 0; i < c0.timeSteps; i++)
{
printf("\n\nPaso de Tiempo %d: \n\n", i);
FuncionPrincipal(c0.maxRows + 2, c0.maxColumns + 2, resultPoint,
resultCalc);
flag = obtenerPath(path);
strcat(path, "/");
strcat(path, etiqueta);
strcat(path, "_");
// poner el path
if (!(flag))
{
if (i % 5 == 0)
{
prepararVisualizacionGNUPlot_2(i, path, c0.maxRows + 2,
c0.maxColumns + 2, resultCalc, 3,
c0.cellWidth, 0, 0);
}
}
else
{
printf("Problemas con el path\n");
}
memcpy(resultPoint, resultCalc,
(c0.maxRows + 2) * (c0.maxColumns + 2) * sizeof(mapCell));
}
postFuncion(c0.maxRows + 2, c0.maxColumns + 2, resultCalc, resultPoint2);
}
}
// fin codigo de prueba;
// place-holders de las funciones del flujo de agrandar reducir
// place-holder de la funcion de escribir gnuplot
// flag = prepararVisualizacionGNUPlot(1, "/home/sergio/salida1", MAX_ROWS,
// MAX_COLS, test, 3, 1, 0, 0); printf("%d",flag); fin del programa
ftime = omp_get_wtime();
exec_time = ftime - itime;
printf("\n\nTime taken is is %f", exec_time, "\n");
return 0;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// use the id(...) matcher around the match expressions that match the nodes
// you want to access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl())))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the id(...) calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::ast_type_traits::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// If the provided matcher matches a node, binds the node to \c ID.
///
/// FIXME: Do we want to support this now that we have bind()?
template <typename T>
internal::Matcher<T> id(StringRef ID,
const internal::BindableMatcher<T> &InnerMatcher) {
return InnerMatcher.bind(ID);
}
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(const std::string &Name) {
return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from
/// a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_MATCHER_P(CXXRecordDecl, isDerivedFrom,
internal::Matcher<NamedDecl>, Base) {
return Finder->classIsDerivedFrom(&Node, Base, Builder, /*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDerivedFrom, std::string, BaseName, 1) {
if (BaseName.empty())
return false;
return isDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom,
internal::Matcher<NamedDecl>, Base, 0) {
return Matcher<CXXRecordDecl>(anyOf(Base, isDerivedFrom(Base)))
.matches(Node, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, std::string,
BaseName, 1) {
if (BaseName.empty())
return false;
return isSameOrDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder);
}
/// Matches C++ classes that are directly derived from a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDirectlyDerivedFrom,
internal::Matcher<NamedDecl>, Base, 0) {
return Finder->classIsDerivedFrom(&Node, Base, Builder, /*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDirectlyDerivedFrom, std::string,
BaseName, 1) {
if (BaseName.empty())
return false;
return isDirectlyDerivedFrom(hasName(BaseName))
.matches(Node, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = ast_type_traits::DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches RecordDecl object that are spelled with "struct."
///
/// Example matches S, but not C or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isStruct) {
return Node.isStruct();
}
/// Matches RecordDecl object that are spelled with "union."
///
/// Example matches U, but not C or S.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isUnion) {
return Node.isUnion();
}
/// Matches RecordDecl object that are spelled with "class."
///
/// Example matches C, but not S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isClass) {
return Node.isClass();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) {
return anyOf(
gnuNullExpr(), cxxNullPtrLiteralExpr(),
integerLiteral(equals(0), hasParent(expr(hasType(pointerType())))));
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(),
Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(intgerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor.
///
/// In C++17 copy elidable constructors are no longer being
/// generated in the AST as it is not permitted by the standard. They are
/// however part of the AST in C++14 and earlier. Therefore, to write a matcher
/// that works in all language modes, the matcher has to skip elidable
/// constructor AST nodes if they appear in the AST. This matcher can be used to
/// skip those elidable constructors.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(any(
/// ignoringElidableConstructorCall(callExpr()),
/// exprWithCleanups(ignoringElidableConstructorCall(callExpr()))))``
/// matches ``H D = G()``
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(&Node)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(),
Finder, Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
counters2omp.c |
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <memory.h>
#include <malloc.h>
#include <papi.h>
#include <omp.h>
#define SIZE 1000
int main(int argc, char **argv) {
float matrixa[SIZE][SIZE], matrixb[SIZE][SIZE], mresult[SIZE][SIZE];
int i,j,k;
int events[2] = {PAPI_L1_DCM, PAPI_L2_DCM }, ret;
long long values[2];
if (PAPI_num_counters() < 2) {
fprintf(stderr, "No hardware counters here, or PAPI not supported.\n");
exit(1);
}
if ((ret = PAPI_start_counters(events, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
/* Initialize the Matrix arrays */
for ( i=0; i<SIZE*SIZE; i++ ){
mresult[0][i] = 0.0;
matrixa[0][i] = matrixb[0][i] = rand()*(float)1.1; }
/* Matrix-Matrix multiply */
#pragma omp parallel for private (j,i) schedule (static)
for (k=0;k<SIZE;k++){
for(j=0;j<SIZE;j++){
for(i=0;i<SIZE;i++){
mresult[i][j]=mresult[i][j] + matrixa[i][k]*matrixb[k][j];
}
}
}
if ((ret = PAPI_read_counters(values, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
printf("Total fallas caché nivel 2 = %lld\n",values[1]);
printf("Total fallas caché nivel 1 = %lld\n",values[0]);
exit(0);
}
|
smd_nfw_omp.c | #include <omp.h>
#include <stdio.h>
#include <math.h>
int main()
{
//-------------------------------------------------------------------------
// SURFACE MASS DENSITY: SIGMA & DELTA-SIGMA
//
// by Jes Ford... August 2013
//
// Function for calculating the surface mass density of a lens
// (for kappa), and differential surface mass density (for gamma)
// according to the NFW profile. Optionally takes miscentering
// into account if the parameter sig_center is given; if not given
// in call to function, then code assumes no miscentering effects.
//
// This program uses parallel processing via openMP
//
// Compile (vn90) with:
// "gcc -o smd_nfw_omp smd_nfw_omp.c -fopenmp -lm"
//
// This program requires 2 input files:
//
// smd_in1.dat ... single column of {r}
// smd_in2.dat ... 4 columns of {rs, delta_c, rho_crit, sig_center}
//
// and gives 2 output files:
// sigma.dat ........ #Rbins columns by #lenses rows [(Msun/pc^2)]
// deltasigma.dat ... #Rbins columns by #lenses rows [(Msun/pc^2)]
//
//
// INPUT PARAMETERS FROM FILES:
// r ............ radii of interest [Mpc]
// rs ........... scale radius of cluster [Mpc]
// delta_c ...... concentration parameter (~characteristic overdensity)
// rho_crit ..... critical energy density [Msun/pc^3]
// (depends on lens z and cosmology)
// sig_center ... Gaussian spread of miscentering [Mpc]
//
// All input parameters are arrays of length = # lenses, except for
// r, which has length = # radial bins.
//
//-------------------------------------------------------------------------
// CHECK FILE LENGTHS AND READ IN ALL DATA
//open files of input {R_Mpc, r_scale, delta_c, rho_crit, sig_center}
FILE *d1,*d2;
d1=fopen("smd_in1.dat", "r");
d2=fopen("smd_in2.dat", "r");
//find number of lines in each file and assign to number of objects
long int lines=0;
char check[100];
while((fgets(check, sizeof check, d1)) != NULL) lines++;
long int nbins=lines;
//printf("\nNumber of R bins: %d\n", nbins);
lines=0;
while((fgets(check, sizeof check, d2)) != NULL) lines++;
//printf("%s\n", check);
long int nlens=lines;
//printf("Number of Clusters: %d\n", nlens);
//input data arrays
//double data[nlens][3];
double r[nbins];
double rs[nlens], delta_c[nlens], rho_crit[nlens], sig_center[nlens];
//close and reopen files
fclose(d1);
fclose(d2);
d1=fopen("smd_in1.dat", "r");
d2=fopen("smd_in2.dat", "r");
//loop indices
long int i, j, k, m, a, b;
//read in {R_Mpc, r_scale, delta_c, rho_crit, sig_center}
for(a=0; a<nbins; a++)
{
fscanf(d1, "%lf", &r[a]);
}
for(a=0; a<nlens; a++)
{
fscanf(d2, "%lf %lf %lf %lf", &rs[a], &delta_c[a], &rho_crit[a], &sig_center[a]);
}
//close files
fclose(d1);
fclose(d2);
//declare output arrays
double sigma_nfw[nlens][nbins];
double deltasigma_nfw[nlens][nbins];
double mean_inside_sigma_nfw[nlens][nbins];
//for "approx=0" variable comparisons
double verysmall = pow(10.,-8.);
long int c1,c2,c3;
if((sig_center[0]) == 0.)
//------------------------------------------------------------------------
// IF ASSUMING NO MISCENTERING...
{
//printf("\nAssuming Perfect Centers...\n");
double f[nlens], g[nlens], h[nlens];
double x, bigF, part_g, firstpart1, firstpart2;
for(j=0; j<nbins; j++)
{
c1=0;
c2=0;
c3=0;
for(i=0; i<nlens; i++)
{
x = r[j]/rs[i]; //dimensionless scalar (ratio)
if((1.-x) >= verysmall)
{
c1++;
bigF = log((1./x)+sqrt((1./(x*x))-1.))/sqrt(1.-(x*x)); //log should be ln
firstpart1 = ((4./(x*x))+(2./(x*x-1.)))/sqrt(1.-(x*x));
part_g = firstpart1*log((1.+sqrt((1.-x)/(1.+x)))/(1.-sqrt((1.-x)/(1.+x))));
f[i] = (1.-bigF)/(x*x-1.);
g[i] = part_g + (4./(x*x))*log(x/2.)-(2./(x*x-1.));
h[i] = (bigF + log(x/2.))/(x*x);
}
else if((x-1.) >= verysmall)
{
c2++;
bigF=acos(1./x)/sqrt(x*x-1.);
firstpart2=(8./(x*x*sqrt(x*x-1.)))+(4./pow((x*x-1.),1.5));
part_g=firstpart2*atan(sqrt((x-1.)/(1.+x)));
f[i] = (1.-bigF)/(x*x-1.);
g[i] = part_g + (4./(x*x))*log(x/2.)-(2./(x*x-1.));
h[i] = (bigF + log(x/2.))/(x*x);
}
else
{
c3++;
f[i] = 1./3.;
g[i] = (10./3.)+4.*log(0.5);
h[i] = 1. + log(0.5);
}
//if((i == 0)) printf("\nf[i]: %lf\n", f[i]);
sigma_nfw[i][j] = 2.*rs[i]*delta_c[i]*rho_crit[i]*f[i];
//options below are equivalent! However both formulations break
//down for miscentering, so calculation is different for that block.
//deltasigma_nfw[i][j] = rs[i]*delta_c[i]*rho_crit[i]*g[i];
mean_inside_sigma_nfw[i][j] = 4.*rs[i]*delta_c[i]*rho_crit[i]*h[i];
deltasigma_nfw[i][j] = mean_inside_sigma_nfw[i][j] - sigma_nfw[i][j];
//if((i == 0)) printf("\ni, j, sigma_nfw[i][j]: %ld, %ld, %lf\n", i,j,sigma_nfw[i][j]);
//if((i == 0)) printf("\nrs[i], delta_c[i], rho_crit[i]: %lf, %lf, %lf\n", rs[i],delta_c[i],rho_crit[i]);
}
}
//printf("\nc1,c2,c3: %d, %d, %d\n", c1,c2,c3);
//printf("\nsigma_nfw[i][j]: %lf\n", sigma_nfw[i][j]);
}
else
//------------------------------------------------------------------------
//IF YOU ARE TAKING MISCENTERING IN ACCOUNT...
{
//printf("\nMiscentered Cluster Calculations...\n");
double f, g, h; //f,g,h are just scalars in this section
double x, bigF, part_g, firstpart1, firstpart2;
long int numRp=20; //precision of integration over r < min(Rbins)
long int numRc=300; //precision of integration over R_centoff
long int numTh=100; //precision of integration over theta
double maxsig=0.; //max miscentering sigma_centoff
for(i=0; i<nlens; i++)
if((sig_center[i]) > maxsig) maxsig=sig_center[i];
//printf("\nmaxsig[k]: %lf\n", maxsig);
//Rp is a composite of the numRp linear bins interior to
// min(r), and midpoints of r bins (the actual measurement bins)
double Rp[nbins+numRp-1];
double deltaRp[nbins+numRp-1];
for(j=0; j<(nbins+numRp-1); j++)
{
if(j<numRp)
{
deltaRp[j]=r[0]/numRp;
Rp[j]=(j+0.5)*deltaRp[j];
}
else if(j>=numRp)
{
deltaRp[j]=r[(j-numRp+1)]-r[(j-numRp)];
Rp[j]=(r[(j-numRp)]+r[(j-numRp+1)])/2.;
}
//printf("\nRp[j]: %lf\n", Rp[j]);
//printf("\ndeltaRp[j]: %lf\n", deltaRp[j]);
}
//R_c = R_centoff array spanning range of possible miscenterings
// (PofRc exponential drops to essentially zero, when Rc ~ 4*sig_center)
double Rc[numRc];
double PofRc[numRc][nlens]; //P(R_centoff) probability of miscentering offsets
for(k=0; k<numRc; k++)
{
Rc[k] = 4.*k*maxsig/numRc;
//printf("\nRc[k]: %lf\n", Rc[k]);
for(i=0; i<nlens; i++)
PofRc[k][i]=(Rc[k]/(sig_center[i]*sig_center[i]))*exp(-0.5*pow((Rc[k]/sig_center[i]),2.));
}
double theta[numTh];
for(m=0; m<numTh; m++)
theta[m] = 2.*M_PI*(m+1.)/numTh;
double dRc,dtheta;
dRc = (Rc[numRc-1]-Rc[0])/(numRc-1.); //differential spacing
dtheta=(theta[numTh-1]-theta[0])/(numTh-1.); //differential angle
//divide into nt=8 threads and parallel process...
long int nt=8; //number of threads ????????????????
long int tid; //thread id number: 0 to (nt-1)
double sigmaof_rgivenRc=0.;
double sigma_smoothed[nlens][nbins][nt];
double sigma_smoothed_Rp[nlens][nbins+numRp-1][nt];
double mean_inside_sigma_smoothed[nlens][nbins];
//Force all array values to zero
//(NECESSARY since they are ADDED TO below)
for(i=0; i<nlens; i++)
{
for(j=0; j<nbins; j++)
{
mean_inside_sigma_smoothed[i][j]=0.;
for(tid=0; tid<nt; tid++) sigma_smoothed[i][j][tid]=0.;
}
for(j=0; j<(nbins+numRp-1); j++)
for(tid=0; tid<nt; tid++) sigma_smoothed_Rp[i][j][tid]=0.;
}
//printf("\nBeginning PARALLEL processing...\n");
#pragma omp parallel for private(tid, i, j, k, m, x, bigF, sigmaof_rgivenRc) num_threads(nt) schedule(dynamic)
for(i=0; i<nlens; i++) //lens loop, threads share execution of this
{
tid=omp_get_thread_num();
//----------------------------------------
// SIGMA CALCULATION
for(j=0; j<nbins; j++) //R bins loop
{
//if(i==0) printf("\nj = %d", j);
for(k=0; k<numRc; k++) //R_centoff loop
{
sigmaof_rgivenRc=0.;
for(m=0; m<numTh; m++) //theta loop
{
// x = r_offset/r_scale [note: instead of sqrt(abs(...)) doing 4th-root of square]
// THIS x is for sigma (different than below)
x = pow(pow((r[j]*r[j] + (Rc[k]*Rc[k]) - 2.*r[j]*Rc[k]*cos(theta[m])),2.),0.25)/rs[i];
if((1.-x) >= verysmall)
{
bigF = log((1./x)+sqrt((1./(x*x))-1.))/sqrt(1.-(x*x)); //log should be ln
f = (1.-bigF)/(x*x-1.);
}
else if((x-1.) >= verysmall)
{
bigF=acos(1./x)/sqrt(x*x-1.);
f = (1.-bigF)/(x*x-1.);
}
else f = 1./3.;
//CONVOLUTION: INTEGRAL OVER THETA
//EQ 7 (Johnston et al. 2007)
sigmaof_rgivenRc += (2.*rs[i]*delta_c[i])*(rho_crit[i]*f*(dtheta/2./M_PI));
//#pragma omp flush
} //end m loop
//INTEGRAL OVER R_centoff
//EQ 9 (Johnston et al. 2007, with his Rs -> my Rc)
sigma_smoothed[i][j][tid] += sigmaof_rgivenRc*PofRc[k][i]*dRc;
//if((i == 349) && (k == numRc-1) && (j == 0)) printf("\nsigma_smoothed[i][j][tid]: %lf\n", sigma_smoothed[i][j][tid]);
} //end k loop
} //end j loop
//----------------------------------------
// MEAN-SIGMA(<r) CALCULATION
for(j=0; j<(nbins+numRp-1); j++) //Rp (extended R) bins loop
{
//if(i==0) printf("\njp = %d", jp);
for(k=0; k<numRc; k++) //R_centoff loop
{
sigmaof_rgivenRc=0.;
for(m=0; m<numTh; m++) //theta loop
{
//#pragma omp flush
// x = r_offset/r_scale [note: instead of sqrt(abs(...)) doing 4th-root of square]
// THIS x is for calculating deltasigma from mean(sigma(<r))
x = pow(pow((Rp[j]*Rp[j] + (Rc[k]*Rc[k]) - 2.*Rp[j]*Rc[k]*cos(theta[m])),2.),0.25)/rs[i];
if((1.-x) >= verysmall)
{
bigF = log((1./x)+sqrt((1./(x*x))-1.))/sqrt(1.-(x*x)); //log should be ln
f = (1.-bigF)/(x*x-1.);
}
else if((x-1.) >= verysmall)
{
bigF=acos(1./x)/sqrt(x*x-1.);
f = (1.-bigF)/(x*x-1.);
}
else f = 1./3.;
//CONVOLUTION: INTEGRAL OVER THETA
//EQ 7 (Johnston et al. 2007)
sigmaof_rgivenRc += (2.*rs[i]*delta_c[i])*(rho_crit[i]*f*(dtheta/2./M_PI));
} //end m loop
//INTEGRAL OVER R_centoff
//EQ 9 (Johnston et al. 2007, with his Rs -> my Rc)
sigma_smoothed_Rp[i][j][tid] += sigmaof_rgivenRc*PofRc[k][i]*dRc;
} //end k loop
} //end j loop
//----------------------------------------
//printf("\ni,tid,sigma_smoothed[349][0][tid]: %ld %ld %lf \n", i, tid, sigma_smoothed[349][0][tid]);
} //end i loop
//printf("\nFinished PARALLEL processing.\n");
//for(tid=0; tid<nt; tid++) printf("\nsigma_smoothed (i=349,j=0): %lf", sigma_smoothed[349][0][tid]);
//for(tid=0; tid<nt; tid++) printf("\nsigma_smoothed_Rp (i=349,j=0): %lf", sigma_smoothed_Rp[349][0][tid]);
//COMBINE THREADS
for(i=0; i<nlens; i++)
{
for(j=0; j<nbins; j++)
{
sigma_nfw[i][j] = 0.;
deltasigma_nfw[i][j] = 0.;
mean_inside_sigma_nfw[i][j] = 0.;
for(tid=0; tid<nt; tid++)
{
//FINAL output (smoothed sigma)
sigma_nfw[i][j] += sigma_smoothed[i][j][tid];
//INTEGRAL OVER INSIDE (r<R)
//EQ 8 (George et al. 2012)
for(k=0; k<(j+numRp); k++)
mean_inside_sigma_smoothed[i][j] += (sigma_smoothed_Rp[i][k][tid]*Rp[k]*deltaRp[k])*(2./(r[j]*r[j]));
}
//FINAL output (smoothed deltasigma)
deltasigma_nfw[i][j] = mean_inside_sigma_smoothed[i][j] - sigma_nfw[i][j];
} //end j loop
} //end i loop
} //END MISCENTERING OPTION
//---------------------------------------------------------------------
// PRINT OUTPUT
//create and open files for writing
FILE *out1, *out2;
out1=fopen("sigma.dat", "w");
out2=fopen("deltasigma.dat", "w");
//print output data
for(a=0; a<nlens; a++)
{
for(b=0; b<nbins; b++)
{
fprintf(out1,"%lf ", sigma_nfw[a][b]);
fprintf(out2,"%lf ", deltasigma_nfw[a][b]);
}
fprintf(out1,"\n");
fprintf(out2,"\n");
}
//close files
fclose(out1);
fclose(out2);
//the end
//printf("\nsmd_nfw_omp FINISHED.\n");
return 0;
}
|
dfwavelet.c | /*
* Copyright 2013-2015 The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013 Frank Ong <frankong@berkeley.edu>
* 2013 Martin Uecker, Pat Virtue, and Mark Murphy
*
*
* Ong F, Uecker M, Tariq U, Hsiao A, Alley MT, Vasanawala SS, Lustig M.
* Robust 4D Flow Denoising using Divergence-free Wavelet Transform,
* Magn Reson Med 2015; 73: 828-842.
*/
#define _GNU_SOURCE
#include <math.h>
#include <string.h>
#include <assert.h>
#include <complex.h>
#include "num/multind.h"
#include "misc/misc.h"
#include "dfwavelet.h"
#include "dfwavelet_impl.h"
#ifdef USE_CUDA
#include "dfwavelet_kernels.h"
#endif
#define str_eq(s1,s2) (!strcmp ((s1),(s2)))
/******** Header *********/
static void dffwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz);
static void dfiwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn);
static void dfsoftthresh_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn);
static void dfwavthresh3_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz);
void dflincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3);
void dfunlincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3);
static void fwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out, data_t* in,int dir);
static void iwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out, data_t* in,int dir);
static void circshift_cpu(struct dfwavelet_plan_s* plan, data_t *data);
static void circunshift_cpu(struct dfwavelet_plan_s* plan, data_t *data);
static void conv_down_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen);
static void conv_up_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen);
static void mult(data_t* in,scalar_t scalar,int maxInd);
static void create_numLevels(struct dfwavelet_plan_s* plan);
static void create_wavelet_sizes(struct dfwavelet_plan_s* plan);
static void create_wavelet_filters(struct dfwavelet_plan_s* plan);
static void get_noise_amp (struct dfwavelet_plan_s* plan);
struct dfwavelet_plan_s* prepare_dfwavelet_plan(int numdims, long* imSize, long* minSize, data_t* res,int use_gpu)
{
struct dfwavelet_plan_s* plan = (struct dfwavelet_plan_s*) malloc(sizeof(struct dfwavelet_plan_s));
plan->use_gpu = use_gpu;
plan->numdims = numdims;
plan->imSize = (long*) malloc(sizeof(long)*numdims);
plan->minSize = (long*) malloc(sizeof(long)*numdims);
plan->res = (data_t*) malloc(sizeof(data_t)*numdims);
plan->percentZero = -1;
plan->noiseAmp = NULL;
// Get imSize, numPixel, numdims
plan->numPixel = 1;
int i;
for (i = 0; i < numdims; i++)
{
plan->imSize[i] = imSize[i];
plan->numPixel *= imSize[i];
plan->minSize[i] = minSize[i];
plan->res[i] = res[i];
}
create_wavelet_filters(plan);
create_numLevels(plan);
create_wavelet_sizes(plan);
plan->randShift = (int*) malloc(sizeof(int)*plan->numdims);
memset(plan->randShift,0,sizeof(int)*plan->numdims);
get_noise_amp(plan);
return plan;
}
void dfwavelet_forward(struct dfwavelet_plan_s* plan, data_t* out_wcdf1, data_t* out_wcdf2, data_t* out_wcn, data_t* in_vx, data_t* in_vy, data_t* in_vz)
{
if(plan->use_gpu==0)
dffwt3_cpu(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dffwt3_gpu(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz);
if(plan->use_gpu==2)
dffwt3_gpuHost(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz);
#endif
}
void dfwavelet_inverse(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
if(plan->use_gpu==0)
dfiwt3_cpu(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dfiwt3_gpu(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn);
if(plan->use_gpu==2)
dfiwt3_gpuHost(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn);
#endif
}
void dfsoft_thresh(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,data_t* wcdf1,data_t* wcdf2, data_t* wcn)
{
if(plan->use_gpu==0)
dfsoftthresh_cpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dfsoftthresh_gpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
if(plan->use_gpu==2)
dfsoftthresh_gpuHost(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
#endif
}
void dfwavelet_thresh(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz)
{
if(plan->use_gpu==0)
dfwavthresh3_cpu(plan,dfthresh,nthresh,out_vx,out_vy,out_vz, in_vx,in_vy,in_vz);
#ifdef USE_CUDA
if(plan->use_gpu==1)
dfwavthresh3_gpu(plan,dfthresh,nthresh, out_vx,out_vy,out_vz, in_vx,in_vy,in_vz);
if(plan->use_gpu==2)
dfwavthresh3_gpuHost(plan,dfthresh,nthresh,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz);
#endif
}
static int dfrand_lim(unsigned int* state, int limit) {
int divisor = RAND_MAX/(limit+1);
int retval = 0;
do {
retval = rand_r(state) / divisor;
} while (retval > limit);
return retval;
}
void dfwavelet_new_randshift (struct dfwavelet_plan_s* plan) {
int i;
int maxShift = 1 << (plan->numLevels);
for(i = 0; i < plan->numdims; i++) {
// Generate random shift value between 0 and maxShift
plan->randShift[i] = dfrand_lim(&plan->state, maxShift);
}
}
void dfwavelet_clear_randshift (struct dfwavelet_plan_s* plan) {
memset(plan->randShift, 0, plan->numdims*sizeof(int));
}
void dfwavelet_free(struct dfwavelet_plan_s* plan)
{
free(plan->imSize);
free(plan->minSize);
free(plan->lod0);
free(plan->lod1);
free(plan->res);
free(plan->waveSizes);
free(plan->randShift);
if (plan->noiseAmp!=NULL)
free(plan->noiseAmp);
free(plan);
}
////////////// Private Functions //////////////
void dffwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
fwt3_cpu(plan,out_wcdf1,in_vx,0);
fwt3_cpu(plan,out_wcdf2,in_vy,1);
fwt3_cpu(plan,out_wcn,in_vz,2);
mult(out_wcdf1,1/plan->res[0],plan->numCoeff);
mult(out_wcdf2,1/plan->res[1],plan->numCoeff);
mult(out_wcn,1/plan->res[2],plan->numCoeff);
dflincomb_cpu(plan,out_wcdf1,out_wcdf2,out_wcn);
}
void dfiwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn)
{
dfunlincomb_cpu(plan,in_wcdf1,in_wcdf2,in_wcn);
mult(in_wcdf1,plan->res[0],plan->numCoeff);
mult(in_wcdf2,plan->res[1],plan->numCoeff);
mult(in_wcn,plan->res[2],plan->numCoeff);
iwt3_cpu(plan,out_vx,in_wcdf1,0);
iwt3_cpu(plan,out_vy,in_wcdf2,1);
iwt3_cpu(plan,out_vz,in_wcn,2);
}
void dfsoftthresh_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* wcdf1,data_t* wcdf2,data_t* wcn)
{
data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
int naInd = 0;
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
int bandInd;
for (bandInd=0; bandInd<7*3;bandInd++)
{
data_t *subband;
scalar_t lambda;
if (bandInd<7)
{
subband = HxLyLz1 + bandInd*blockSize;
lambda = dfthresh * plan->noiseAmp[naInd];
} else if (bandInd<14)
{
subband = HxLyLz2 + (bandInd-7)*blockSize;
lambda = dfthresh * plan->noiseAmp[naInd];
} else
{
subband = HxLyLz3 + (bandInd-14)*blockSize;
lambda = nthresh * plan->noiseAmp[naInd];
}
// SoftThresh
float const eps = 1.1921e-7f;
#pragma omp parallel for
for(int i = 0; i < blockSize; i++)
{
scalar_t norm = cabs(subband[i]);
scalar_t red = norm - lambda;
red = 0.5f*(red + fabs(red));
red = red / (norm + eps);
subband[i] = red * subband[i];
}
naInd++;
}
}
}
void dfwavthresh3_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz)
{
data_t *wcdf1,*wcdf2,*wcn;
wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
dffwt3_cpu(plan, wcdf1,wcdf2,wcn,in_vx,in_vy,in_vz);
dfsoftthresh_cpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn);
dfiwt3_cpu(plan,out_vx,out_vy,out_vz,wcdf1,wcdf2,wcn);
free(wcdf1);
free(wcdf2);
free(wcn);
}
void dflincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3)
{
data_t* HxLyLz1 = wc1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wc2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wc3 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
int i,j,k;
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
data_t* LxHyLz1 = HxLyLz1 + blockSize;
data_t* HxHyLz1 = LxHyLz1 + blockSize;
data_t* LxLyHz1 = HxHyLz1 + blockSize;
data_t* HxLyHz1 = LxLyHz1 + blockSize;
data_t* LxHyHz1 = HxLyHz1 + blockSize;
data_t* HxHyHz1 = LxHyHz1 + blockSize;
data_t* LxHyLz2 = HxLyLz2 + blockSize;
data_t* HxHyLz2 = LxHyLz2 + blockSize;
data_t* LxLyHz2 = HxHyLz2 + blockSize;
data_t* HxLyHz2 = LxLyHz2 + blockSize;
data_t* LxHyHz2 = HxLyHz2 + blockSize;
data_t* HxHyHz2 = LxHyHz2 + blockSize;
data_t* LxHyLz3 = HxLyLz3 + blockSize;
data_t* HxHyLz3 = LxHyLz3 + blockSize;
data_t* LxLyHz3 = HxHyLz3 + blockSize;
data_t* HxLyHz3 = LxLyHz3 + blockSize;
data_t* LxHyHz3 = HxLyHz3 + blockSize;
data_t* HxHyHz3 = LxHyHz3 + blockSize;
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
data_t wcx100 = HxLyLz1[ind];
data_t wcy100 = HxLyLz2[ind];
data_t wcz100 = HxLyLz3[ind];
data_t wcx010 = LxHyLz1[ind];
data_t wcy010 = LxHyLz2[ind];
data_t wcz010 = LxHyLz3[ind];
data_t wcx001 = LxLyHz1[ind];
data_t wcy001 = LxLyHz2[ind];
data_t wcz001 = LxLyHz3[ind];
data_t wcx110 = HxHyLz1[ind];
data_t wcy110 = HxHyLz2[ind];
data_t wcz110 = HxHyLz3[ind];
data_t wcx101 = HxLyHz1[ind];
data_t wcy101 = HxLyHz2[ind];
data_t wcz101 = HxLyHz3[ind];
data_t wcx011 = LxHyHz1[ind];
data_t wcy011 = LxHyHz2[ind];
data_t wcz011 = LxHyHz3[ind];
data_t wcx111 = HxHyHz1[ind];
data_t wcy111 = HxHyHz2[ind];
data_t wcz111 = HxHyHz3[ind];
HxLyLz1[ind] = wcy100;
LxHyLz1[ind] = wcx010;
LxLyHz1[ind] = wcy001;
HxLyLz2[ind] = wcz100;
LxHyLz2[ind] = wcz010;
LxLyHz2[ind] = wcx001;
HxLyLz3[ind] = wcx100;
LxHyLz3[ind] = wcy010;
LxLyHz3[ind] = wcz001;
HxHyLz1[ind] = 0.5*(wcx110-wcy110);
HxLyHz1[ind] = 0.5*(wcz101-wcx101);
LxHyHz1[ind] = 0.5*(wcy011-wcz011);
HxHyLz2[ind] = wcz110;
HxLyHz2[ind] = wcy101;
LxHyHz2[ind] = wcx011;
HxHyLz3[ind] = 0.5*(wcx110+wcy110);
HxLyHz3[ind] = 0.5*(wcz101+wcx101);
LxHyHz3[ind] = 0.5*(wcy011+wcz011);
HxHyHz1[ind] = 1/3.*(-2*wcx111+wcy111+wcz111);
HxHyHz2[ind] = 1/3.*(-wcx111+2*wcy111-wcz111);
HxHyHz3[ind] = 1/3.*(wcx111+wcy111+wcz111);
}
#pragma omp barrier
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
int indxs = ind-1;
int indys = ind-dxNext;
int indzs = ind-dxNext*dyNext;
if (i==0)
indxs = 0;
if (j==0)
indys = 0;
if (k==0)
indzs = 0;
data_t wcy100 = HxLyLz1[ind];
data_t wcy100s = HxLyLz1[indys];
data_t wcz100 = HxLyLz2[ind];
data_t wcz100s = HxLyLz2[indzs];
data_t wcx010 = LxHyLz1[ind];
data_t wcx010s = LxHyLz1[indxs];
data_t wcz010 = LxHyLz2[ind];
data_t wcz010s = LxHyLz2[indzs];
data_t wcx001 = LxLyHz2[ind];
data_t wcx001s = LxLyHz2[indxs];
data_t wcy001 = LxLyHz1[ind];
data_t wcy001s = LxLyHz1[indys];
data_t wcz110 = HxHyLz2[ind];
data_t wcz110s = HxHyLz2[indzs];
data_t wcy101 = HxLyHz2[ind];
data_t wcy101s = HxLyHz2[indys];
data_t wcx011 = LxHyHz2[ind];
data_t wcx011s = LxHyHz2[indxs];
HxLyLz3[ind] = HxLyLz3[ind]+0.25*(wcy100-wcy100s+wcz100-wcz100s);
LxHyLz3[ind] = LxHyLz3[ind]+0.25*(wcx010-wcx010s+wcz010-wcz010s);
LxLyHz3[ind] = LxLyHz3[ind]+0.25*(wcx001-wcx001s+wcy001-wcy001s);
HxHyLz3[ind] = HxHyLz3[ind] + 0.125*(wcz110-wcz110s);
HxLyHz3[ind] = HxLyHz3[ind] + 0.125*(wcy101-wcy101s);
LxHyHz3[ind] = LxHyHz3[ind] + 0.125*(wcx011-wcx011s);
}
}
}
void dfunlincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3)
{
data_t* HxLyLz1 = wc1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wc2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wc3 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
int i,j,k;
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
data_t* LxHyLz1 = HxLyLz1 + blockSize;
data_t* HxHyLz1 = LxHyLz1 + blockSize;
data_t* LxLyHz1 = HxHyLz1 + blockSize;
data_t* HxLyHz1 = LxLyHz1 + blockSize;
data_t* LxHyHz1 = HxLyHz1 + blockSize;
data_t* HxHyHz1 = LxHyHz1 + blockSize;
data_t* LxHyLz2 = HxLyLz2 + blockSize;
data_t* HxHyLz2 = LxHyLz2 + blockSize;
data_t* LxLyHz2 = HxHyLz2 + blockSize;
data_t* HxLyHz2 = LxLyHz2 + blockSize;
data_t* LxHyHz2 = HxLyHz2 + blockSize;
data_t* HxHyHz2 = LxHyHz2 + blockSize;
data_t* LxHyLz3 = HxLyLz3 + blockSize;
data_t* HxHyLz3 = LxHyLz3 + blockSize;
data_t* LxLyHz3 = HxHyLz3 + blockSize;
data_t* HxLyHz3 = LxLyHz3 + blockSize;
data_t* LxHyHz3 = HxLyHz3 + blockSize;
data_t* HxHyHz3 = LxHyHz3 + blockSize;
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
data_t df1_100 = HxLyLz1[ind];
data_t df2_100 = HxLyLz2[ind];
data_t n_100 = HxLyLz3[ind];
data_t df1_010 = LxHyLz1[ind];
data_t df2_010 = LxHyLz2[ind];
data_t n_010 = LxHyLz3[ind];
data_t df1_001 = LxLyHz1[ind];
data_t df2_001 = LxLyHz2[ind];
data_t n_001 = LxLyHz3[ind];
data_t df1_110 = HxHyLz1[ind];
data_t df2_110 = HxHyLz2[ind];
data_t n_110 = HxHyLz3[ind];
data_t df1_101 = HxLyHz1[ind];
data_t df2_101 = HxLyHz2[ind];
data_t n_101 = HxLyHz3[ind];
data_t df1_011 = LxHyHz1[ind];
data_t df2_011 = LxHyHz2[ind];
data_t n_011 = LxHyHz3[ind];
data_t df1_111 = HxHyHz1[ind];
data_t df2_111 = HxHyHz2[ind];
data_t n_111 = HxHyHz3[ind];
HxLyLz2[ind] = df1_100;
LxHyLz1[ind] = df1_010;
LxLyHz2[ind] = df1_001;
HxLyLz3[ind] = df2_100;
LxHyLz3[ind] = df2_010;
LxLyHz1[ind] = df2_001;
HxLyLz1[ind] = n_100;
LxHyLz2[ind] = n_010;
LxLyHz3[ind] = n_001;
HxHyLz3[ind] = df2_110;
HxLyHz2[ind] = df2_101;
LxHyHz1[ind] = df2_011;
HxHyLz1[ind] = (df1_110+n_110);
HxLyHz3[ind] = (df1_101+n_101);
LxHyHz2[ind] = (df1_011+n_011);
HxHyLz2[ind] = (-df1_110+n_110);
HxLyHz1[ind] = (-df1_101+n_101);
LxHyHz3[ind] = (-df1_011+n_011);
HxHyHz1[ind] = (-df1_111+n_111);
HxHyHz2[ind] = (df2_111+n_111);
HxHyHz3[ind] = df1_111-df2_111+n_111;
}
#pragma omp barrier
#pragma omp parallel for private(i,j,k)
for (k=0;k<dzNext;k++)
for (j=0;j<dyNext;j++)
for (i=0;i<dxNext;i++)
{
int ind = i+j*dxNext+k*dxNext*dyNext;
int indxs = ind-1;
int indys = ind-dxNext;
int indzs = ind-dxNext*dyNext;
if (i==0)
indxs = 0;
if (j==0)
indys = 0;
if (k==0)
indzs = 0;
data_t df1_100 = HxLyLz2[ind];
data_t df1_100s = HxLyLz2[indys];
data_t df2_100 = HxLyLz3[ind];
data_t df2_100s = HxLyLz3[indzs];
data_t df1_010 = LxHyLz1[ind];
data_t df1_010s = LxHyLz1[indxs];
data_t df2_010 = LxHyLz3[ind];
data_t df2_010s = LxHyLz3[indzs];
data_t df2_001 = LxLyHz1[ind];
data_t df2_001s = LxLyHz1[indxs];
data_t df1_001 = LxLyHz2[ind];
data_t df1_001s = LxLyHz2[indys];
data_t df2_110 = HxHyLz3[ind];
data_t df2_110s = HxHyLz3[indzs];
data_t df2_101 = HxLyHz2[ind];
data_t df2_101s = HxLyHz2[indys];
data_t df2_011 = LxHyHz1[ind];
data_t df2_011s = LxHyHz1[indxs];
HxLyLz1[ind] = HxLyLz1[ind]-0.25*(df1_100-df1_100s+df2_100-df2_100s);
LxHyLz2[ind] = LxHyLz2[ind]-0.25*(df1_010-df1_010s+df2_010-df2_010s);
LxLyHz3[ind] = LxLyHz3[ind]-0.25*(df2_001-df2_001s+df1_001-df1_001s);
HxHyLz1[ind] = HxHyLz1[ind] - 0.125*(df2_110-df2_110s);
HxLyHz3[ind] = HxLyHz3[ind] - 0.125*(df2_101-df2_101s);
LxHyHz2[ind] = LxHyHz2[ind] - 0.125*(df2_011-df2_011s);
HxHyLz2[ind] = HxHyLz2[ind] - 0.125*(df2_110-df2_110s);
HxLyHz1[ind] = HxLyHz1[ind] - 0.125*(df2_101-df2_101s);
LxHyHz3[ind] = LxHyHz3[ind] - 0.125*(df2_011-df2_011s);
}
}
}
void fwt3_cpu(struct dfwavelet_plan_s* plan, data_t* coeff, data_t* inImage,int dir)
{
circshift_cpu(plan,inImage);
data_t* origInImage = inImage;
data_t* HxLyLz = coeff + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int dx = plan->imSize[0];
int dy = plan->imSize[1];
int dz = plan->imSize[2];
int dxNext = plan->waveSizes[0 + 3*plan->numLevels];
int dyNext = plan->waveSizes[1 + 3*plan->numLevels];
int dzNext = plan->waveSizes[2 + 3*plan->numLevels];
int blockSize = dxNext*dyNext*dzNext;
data_t* LxLyLz = (data_t*) malloc(sizeof(data_t)*blockSize);
data_t* tempz = (data_t*) malloc(sizeof(data_t)*dx*dy*dzNext);
data_t* tempyz = (data_t*) malloc(sizeof(data_t)*dx*dyNext*dzNext);
data_t* tempxyz = (data_t*) malloc(sizeof(data_t)*blockSize);
// Assign Filters
scalar_t *lodx,*lody,*lodz,*hidx,*hidy,*hidz;
lodx = plan->lod0;
lody = plan->lod0;
lodz = plan->lod0;
hidx = plan->hid0;
hidy = plan->hid0;
hidz = plan->hid0;
if (dir==0)
{
lodx = plan->lod1;
hidx = plan->hid1;
}
if (dir==1)
{
lody = plan->lod1;
hidy = plan->hid1;
}
if (dir==2)
{
lodz = plan->lod1;
hidz = plan->hid1;
}
for (l = plan->numLevels; l >= 1; --l)
{
dxNext = plan->waveSizes[0 + 3*l];
dyNext = plan->waveSizes[1 + 3*l];
dzNext = plan->waveSizes[2 + 3*l];
blockSize = dxNext*dyNext*dzNext;
HxLyLz = HxLyLz - 7*blockSize;
data_t* LxHyLz = HxLyLz + blockSize;
data_t* HxHyLz = LxHyLz + blockSize;
data_t* LxLyHz = HxHyLz + blockSize;
data_t* HxLyHz = LxLyHz + blockSize;
data_t* LxHyHz = HxLyHz + blockSize;
data_t* HxHyHz = LxHyHz + blockSize;
int dxy = dx*dy;
int newdz = (dz + plan->filterLen-1) / 2;
int newdy = (dy + plan->filterLen-1) / 2;
int newdxy = dx*newdy;
// Lz
conv_down_3d(tempz, inImage, dz, dxy, dx, 1, dy, dx, lodz,plan->filterLen);
// LyLz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, lody,plan->filterLen);
conv_down_3d(LxLyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxLyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
// HyLz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, hidy,plan->filterLen);
conv_down_3d(LxHyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxHyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
// Hz
conv_down_3d(tempz, inImage, dz, dxy, dx, 1, dy, dx, hidz,plan->filterLen);
// LyHz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, lody,plan->filterLen);
conv_down_3d(LxLyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxLyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
// HyHz
conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, hidy,plan->filterLen);
conv_down_3d(LxHyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen);
conv_down_3d(HxHyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen);
memcpy(tempxyz, LxLyLz, blockSize*sizeof(data_t));
inImage = tempxyz;
dx = dxNext;
dy = dyNext;
dz = dzNext;
}
// Final LxLyLz
memcpy(coeff, inImage, plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]*sizeof(data_t));
free(LxLyLz);
free(tempz);
free(tempyz);
free(tempxyz);
circunshift_cpu(plan,origInImage);
}
void iwt3_cpu(struct dfwavelet_plan_s* plan, data_t* outImage, data_t* coeff,int dir)
{
// Workspace dimensions
int dxWork = plan->waveSizes[0 + 3*plan->numLevels]*2-1 + plan->filterLen-1;
int dyWork = plan->waveSizes[1 + 3*plan->numLevels]*2-1 + plan->filterLen-1;
int dzWork = plan->waveSizes[2 + 3*plan->numLevels]*2-1 + plan->filterLen-1;
int dyWork2 = plan->waveSizes[1 + 3*(plan->numLevels-1)]*2-1 + plan->filterLen-1;
int dzWork2 = plan->waveSizes[2 + 3*(plan->numLevels-1)]*2-1 + plan->filterLen-1;
// Workspace
data_t* tempyz = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork2*dzWork2);
data_t* tempz = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork*dzWork2);
data_t* tempFull = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork*dzWork);
int dx = plan->waveSizes[0];
int dy = plan->waveSizes[1];
int dz = plan->waveSizes[2];
// Assign Filters
scalar_t *lorx,*lory,*lorz,*hirx,*hiry,*hirz;
lorx = plan->lor0;
lory = plan->lor0;
lorz = plan->lor0;
hirx = plan->hir0;
hiry = plan->hir0;
hirz = plan->hir0;
if (dir==0)
{
lorx = plan->lor1;
hirx = plan->hir1;
}
if (dir==1)
{
lory = plan->lor1;
hiry = plan->hir1;
}
if (dir==2)
{
lorz = plan->lor1;
hirz = plan->hir1;
}
memcpy(outImage, coeff, dx*dy*dz*sizeof(data_t));
data_t* HxLyLz = coeff + dx*dy*dz;
int level;
for (level = 1; level < plan->numLevels+1; ++level)
{
dx = plan->waveSizes[0 + 3*level];
dy = plan->waveSizes[1 + 3*level];
dz = plan->waveSizes[2 + 3*level];
int blockSize = dx*dy*dz;
data_t* LxHyLz = HxLyLz + blockSize;
data_t* HxHyLz = LxHyLz + blockSize;
data_t* LxLyHz = HxHyLz + blockSize;
data_t* HxLyHz = LxLyHz + blockSize;
data_t* LxHyHz = HxLyHz + blockSize;
data_t* HxHyHz = LxHyHz + blockSize;
data_t* LxLyLz = outImage;
int newdx = 2*dx-1 + plan->filterLen-1;
int newdy = 2*dy-1 + plan->filterLen-1;
int newdz = 2*dz-1 + plan->filterLen-1;
int dxy = dx*dy;
int newdxy = newdx*dy;
int newnewdxy = newdx*newdy;
memset(tempFull, 0, newnewdxy*newdz*sizeof(data_t));
memset(tempz, 0, newnewdxy*dz*sizeof(data_t));
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxLyLz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxLyLz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, lory,plan->filterLen);
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxHyLz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxHyLz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, hiry,plan->filterLen);
conv_up_3d(tempFull, tempz, dz, newnewdxy, newdx, 1, newdy, newdx, lorz,plan->filterLen);
memset(tempz, 0, newnewdxy*dz*sizeof(data_t));
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxLyHz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxLyHz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, lory,plan->filterLen);
memset(tempyz, 0, newdxy*dz*sizeof(data_t));
conv_up_3d(tempyz, LxHyHz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen);
conv_up_3d(tempyz, HxHyHz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen);
conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, hiry,plan->filterLen);
conv_up_3d(tempFull, tempz, dz, newnewdxy, newdx, 1, newdy, newdx, hirz,plan->filterLen);
// Crop center of workspace
int dxNext = plan->waveSizes[0+3*(level+1)];
int dyNext = plan->waveSizes[1+3*(level+1)];
int dzNext = plan->waveSizes[2+3*(level+1)];
int dxyNext = dxNext*dyNext;
dxWork = (2*dx-1 + plan->filterLen-1);
dyWork = (2*dy-1 + plan->filterLen-1);
dzWork = (2*dz-1 + plan->filterLen-1);
int dxyWork = dxWork*dyWork;
int xOffset = (int) ((dxWork - dxNext) / 2.0);
int yOffset = (int) ((dyWork - dyNext) / 2.0);
int zOffset = (int) ((dzWork - dzNext) / 2.0);
int k,j;
for (k = 0; k < dzNext; ++k){
for (j = 0; j < dyNext; ++j){
memcpy(outImage+j*dxNext + k*dxyNext, tempFull+xOffset + (yOffset+j)*dxWork + (zOffset+k)*dxyWork, dxNext*sizeof(data_t));
}
}
HxLyLz += 7*blockSize;
}
free(tempyz);
free(tempz);
free(tempFull);
circunshift_cpu(plan,outImage);
}
void circshift_cpu(struct dfwavelet_plan_s* plan, data_t *data) {
if (plan->randshift)
dfwavelet_new_randshift(plan);
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
// Copy data
data_t* dataCopy = malloc(sizeof(data_t)*plan->numPixel);
memcpy(dataCopy, data, plan->numPixel*sizeof(data_t));
if (plan->numdims==2)
{
int dx,dy,r0,r1,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
#pragma omp parallel for private(index, j, i,indexShifted)
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx;
indexShifted = (((i+r0) + (j+r1)*dx)%(dx*dy)+dx*dy)%(dx*dy);
data[indexShifted] = dataCopy[index];
}
}
}
if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2,k,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
#pragma omp parallel for private(index, k, j, i,indexShifted)
for (k = 0; k < dz; k++) {
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx+k*dx*dy;
indexShifted = ((i+r0 + (j+r1)*dx + (k+r2)*dx*dy)%(dx*dy*dz)+(dx*dy*dz))%(dx*dy*dz);
data[indexShifted] = dataCopy[index];
}
}
}
}
#pragma omp barrier
free(dataCopy);
}
void circunshift_cpu(struct dfwavelet_plan_s* plan, data_t *data) {
// Return if no shifts
int zeroShift = 1;
int i;
for (i = 0; i< plan->numdims; i++)
{
zeroShift &= (plan->randShift[i]==0);
}
if(zeroShift) {
return;
}
// Copy data
data_t* dataCopy = malloc(sizeof(data_t)*plan->numPixel);
memcpy(dataCopy, data, plan->numPixel*sizeof(data_t));
if (plan->numdims==2)
{
int dx,dy,r0,r1,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
#pragma omp parallel for private(index, j, i,indexShifted)
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx;
indexShifted = (((i+r0) + (j+r1)*dx)%(dx*dy)+dx*dy)%(dx*dy);
data[index] = dataCopy[indexShifted];
}
}
}
if (plan->numdims==3)
{
int dx,dy,dz,r0,r1,r2,k,j,i,index,indexShifted;
dx = plan->imSize[0];
dy = plan->imSize[1];
dz = plan->imSize[2];
r0 = plan->randShift[0];
r1 = plan->randShift[1];
r2 = plan->randShift[2];
#pragma omp parallel for private(index, k, j, i,indexShifted)
for (k = 0; k < dz; k++) {
for(j = 0; j < dy; j++) {
for(i = 0; i < dx; i++) {
index = i+j*dx+k*dx*dy;
indexShifted = ((i+r0 + (j+r1)*dx + (k+r2)*dx*dy)%(dx*dy*dz)+(dx*dy*dz))%(dx*dy*dz);
data[index] = dataCopy[indexShifted];
}
}
}
}
free(dataCopy);
}
/********** Helper Function *********/
void conv_down_3d(data_t *out, data_t *in,
int size1, int skip1, int size2, int skip2, int size3, int skip3,
scalar_t *filter, int filterLen)
{
int outSize1 = (size1 + filterLen-1) / 2;
// Adjust out skip 2 and 3 if needed
int outSkip2;
if(skip2 > skip1) {
outSkip2 = outSize1*skip2/size1;
}
else {
outSkip2 = skip2;
}
int outSkip3;
if(skip3 > skip1) {
outSkip3 = outSize1*skip3/size1;
}
else {
outSkip3 = skip3;
}
int i32;
#pragma omp parallel for
for (i32 = 0; i32 < size2*size3; ++i32)
{
int i2 = i32 % size2;
int i3 = i32 / size2;
int i1;
for (i1 = 0; i1 < outSize1; ++i1)
{
out[i3*outSkip3 + i2*outSkip2 + i1*skip1] = 0.0f;
int k;
for (k = 0; k < filterLen; ++k)
{
int out_i1 = 2*i1+1 - (filterLen-1) + k;
if (out_i1 < 0) out_i1 = -out_i1-1;
if (out_i1 >= size1) out_i1 = size1-1 - (out_i1-size1);
out[i3*outSkip3 + i2*outSkip2 + i1*skip1] += in[i3*skip3 + i2*skip2 + out_i1*skip1] * filter[filterLen-1-k];
}
}
}
}
void conv_up_3d(data_t *out, data_t *in,
int size1, int skip1, int size2, int skip2, int size3, int skip3,
scalar_t *filter, int filterLen)
{
int outSize1 = 2*size1-1 + filterLen-1;
// Adjust out skip 2 and 3 if needed
int outSkip2;
if(skip2 > skip1) {
outSkip2 = outSize1*skip2/size1;
}
else {
outSkip2 = skip2;
}
int outSkip3;
if(skip3 > skip1) {
outSkip3 = outSize1*skip3/size1;
}
else {
outSkip3 = skip3;
}
int i32;
#pragma omp parallel for
for (i32 = 0; i32 < size2*size3; ++i32)
{
int i2 = i32 % size2;
int i3 = i32 / size2;
int i1;
for (i1 = 0; i1 < outSize1; ++i1) {
int k;
for (k = (i1 - (filterLen-1)) & 1; k < filterLen; k += 2){
int in_i1 = (i1 - (filterLen-1) + k) >> 1;
if (in_i1 >= 0 && in_i1 < size1)
out[i3*outSkip3 + i2*outSkip2 + i1*skip1] += in[i3*skip3 + i2*skip2 + in_i1*skip1] * filter[filterLen-1-k];
}
}
}
}
void mult(data_t* in,scalar_t scale,int numMax)
{
int i;
for(i=0; i<numMax;i++)
in[i]*=scale;
}
void create_numLevels(struct dfwavelet_plan_s* plan)
{
int numdims = plan->numdims;
int filterLen = plan->filterLen;
int bandSize, l, minSize;
plan->numLevels = 10000000;
int d;
for (d = 0; d < numdims; d++)
{
bandSize = plan->imSize[d];
minSize = plan->minSize[d];
l = 0;
while (bandSize > minSize)
{
++l;
bandSize = (bandSize + filterLen - 1) / 2;
}
l--;
plan->numLevels = (l < plan->numLevels) ? l : plan->numLevels;
}
}
void create_wavelet_sizes(struct dfwavelet_plan_s* plan)
{
int numdims = plan->numdims;
int filterLen = plan->filterLen;
int numLevels = plan->numLevels;
int numSubCoef;
plan->waveSizes = (long*) malloc(sizeof(long)*numdims*(numLevels+2));
// Get number of subband per level, (3 for 2d, 7 for 3d)
// Set the last bandSize to be imSize
int d,l;
int numSubband = 1;
for (d = 0; d<numdims; d++)
{
plan->waveSizes[d + numdims*(numLevels+1)] = plan->imSize[d];
numSubband <<= 1;
}
numSubband--;
// Get numCoeff and waveSizes
// Each bandSize[l] is (bandSize[l+1] + filterLen - 1)/2
plan->numCoeff = 0;
for (l = plan->numLevels; l >= 1; --l) {
numSubCoef = 1;
for (d = 0; d < numdims; d++)
{
plan->waveSizes[d + numdims*l] = (plan->waveSizes[d + numdims*(l+1)] + filterLen - 1) / 2;
numSubCoef *= plan->waveSizes[d + numdims*l];
}
plan->numCoeff += numSubband*numSubCoef;
if (l==1)
plan->numCoarse = numSubCoef;
}
numSubCoef = 1;
for (d = 0; d < numdims; d++)
{
plan->waveSizes[d] = plan->waveSizes[numdims+d];
numSubCoef *= plan->waveSizes[d];
}
plan->numCoeff += numSubCoef;
}
/* All filter coefficients are obtained from http://wavelets.pybytes.com/ */
void create_wavelet_filters(struct dfwavelet_plan_s* plan)
{
int filterLen = 0;
scalar_t* filter1, *filter2;
filterLen = 6;
// CDF 2.2 and CDF 3.1 Wavelet
scalar_t cdf22[] = {
0.0,-0.17677669529663689,0.35355339059327379,1.0606601717798214,0.35355339059327379,-0.17677669529663689,
0.0,0.35355339059327379,-0.70710678118654757,0.35355339059327379,0.0,0.0,
0.0,0.35355339059327379,0.70710678118654757,0.35355339059327379,0.0,0.0,
0.0,0.17677669529663689,0.35355339059327379,-1.0606601717798214,0.35355339059327379,0.17677669529663689
};
scalar_t cdf31[] = {
0.0,-0.35355339059327379,1.0606601717798214,1.0606601717798214,-0.35355339059327379,0.0 ,
0.0,-0.17677669529663689,0.53033008588991071,-0.53033008588991071,0.17677669529663689,0.0,
0.0,0.17677669529663689,0.53033008588991071,0.53033008588991071,0.17677669529663689,0.0,
0.0,-0.35355339059327379,-1.0606601717798214,1.0606601717798214,0.35355339059327379,0.0
};
filter1 = cdf22;
filter2 = cdf31;
// Allocate filters contiguously (for convenience)
plan->filterLen = filterLen;
plan->lod0 = (scalar_t*) malloc(sizeof(scalar_t) * 4 * filterLen);
memcpy(plan->lod0, filter1, 4*filterLen*sizeof(scalar_t));
plan->lod1 = (scalar_t*) malloc(sizeof(scalar_t) * 4 * filterLen);
memcpy(plan->lod1, filter2, 4*filterLen*sizeof(scalar_t));
plan->hid0 = plan->lod0 + 1*filterLen;
plan->lor0 = plan->lod0 + 2*filterLen;
plan->hir0 = plan->lod0 + 3*filterLen;
plan->hid1 = plan->lod1 + 1*filterLen;
plan->lor1 = plan->lod1 + 2*filterLen;
plan->hir1 = plan->lod1 + 3*filterLen;
}
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static data_t drand() /* uniform distribution, (0..1] */
{
return (rand()+1.0)/(RAND_MAX+1.0);
}
static void random_normal(data_t* in,int length)
/* normal distribution, centered on 0, std dev 1 */
{
int i;
for (i=0;i<length;i++)
in[i] = sqrt(-2*log(drand())) * cos(2*M_PI*drand());
}
void get_noise_amp(struct dfwavelet_plan_s* plan)
{
if (plan->noiseAmp==NULL)
{
// Generate Gaussian w/ mean=0, std=1 data
data_t* vx,*vy,*vz;
data_t* wcdf1,*wcdf2,*wcn;
vx = (data_t*) malloc(sizeof(data_t)*plan->numPixel);
vy = (data_t*) malloc(sizeof(data_t)*plan->numPixel);
vz = (data_t*) malloc(sizeof(data_t)*plan->numPixel);
random_normal(vx,plan->numPixel);
random_normal(vy,plan->numPixel);
random_normal(vz,plan->numPixel);
wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff);
// Get Wavelet Coefficients
int temp_use_gpu = plan->use_gpu;
if (plan->use_gpu==1)
plan->use_gpu = 2;
dfwavelet_forward(plan,wcdf1,wcdf2,wcn,vx,vy,vz);
plan->use_gpu = temp_use_gpu;
// Get Noise Amp for each subband
data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2];
int l;
for (l = 1; l <= plan->numLevels; ++l){
HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l];
}
int numBand = 7*plan->numLevels*3;
plan->noiseAmp = (scalar_t*) malloc(sizeof(scalar_t)*numBand);
int naInd = 0;
for (l = plan->numLevels; l >= 1; --l)
{
int dxNext = plan->waveSizes[0 + 3*l];
int dyNext = plan->waveSizes[1 + 3*l];
int dzNext = plan->waveSizes[2 + 3*l];
int blockSize = dxNext*dyNext*dzNext;
HxLyLz1 = HxLyLz1 - 7*blockSize;
HxLyLz2 = HxLyLz2 - 7*blockSize;
HxLyLz3 = HxLyLz3 - 7*blockSize;
int bandInd;
//#pragma omp parallel for private(bandInd)
for (bandInd=0; bandInd<7*3;bandInd++)
{
data_t *subband;
if (bandInd<7)
{
subband = HxLyLz1 + bandInd*blockSize;
} else if (bandInd<14)
{
subband = HxLyLz2 + (bandInd-7)*blockSize;
} else
{
subband = HxLyLz3 + (bandInd-14)*blockSize;
}
data_t sig = 0;
data_t mean = 0;
data_t mean_old;
int i;
for (i=0; i<blockSize; i++)
{
scalar_t x = subband[i];
mean_old = mean;
mean = mean_old + (x-mean_old)/(i+1);
sig = sig + (x - mean_old)*(x-mean);
}
sig = sqrt(sig/(blockSize-1));
plan->noiseAmp[naInd] = sig;
naInd++;
}
}
free(vx);
free(vy);
free(vz);
free(wcdf1);
free(wcdf2);
free(wcn);
}
}
|
concom.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include "app-desc.h"
#include "bots.h"
// bots_arg_size == Number of nodes
// bots_arg_size_1 == Maximum number of neighbors per node
// bots_arg_size_2 == Number of links in the entire graph
node *nodes;
int *visited, *components;
// Checks to see if two nodes can be linked
int linkable(int N1, int N2) {
int i;
if (N1 == N2) return (0);
if (nodes[N1].n >= bots_arg_size_1) return (0);
if (nodes[N2].n >= bots_arg_size_1) return (0);
for (i = 0; i < nodes[N1].n; i++)
if (nodes[N1].neighbor[i] == N2) return (0);
return (1);
}
// Allocates and creates a graph with random links between nodes
// also allocates visited and components vectors
void initialize() {
int i, l1, l2, N1, N2;
double RN;
nodes = (node *) malloc(bots_arg_size * sizeof(node));
visited = (int *) malloc(bots_arg_size * sizeof(int));
components = (int *) malloc(bots_arg_size * sizeof(int));
/* initialize nodes */
for (i = 0; i < bots_arg_size; i++) {
nodes[i].n = 0;
nodes[i].neighbor = (int *) malloc(bots_arg_size_1 * sizeof(int));
}
/* for each link, generate end nodes and link */
for (i = 0; i < bots_arg_size_2; i++)
{
RN = rand() / (double) RAND_MAX;
N1 = (int) ((bots_arg_size-1) * RN);
RN = rand() / (double) RAND_MAX;
N2 = (int) ((bots_arg_size-1) * RN);
if (linkable(N1, N2)) {
l1 = nodes[N1].n;
l2 = nodes[N2].n;
nodes[N1].neighbor[l1] = N2;
nodes[N2].neighbor[l2] = N1;
nodes[N1].n += 1;
nodes[N2].n += 1;
}
}
}
// Writes the number of CCs
void write_outputs(int n, int cc) {
int i;
printf("Graph %d, Number of components %d\n", n, cc);
if (bots_verbose_mode)
for (i = 0; i < cc; i++)
printf("Component %d Size: %d\n", i, components[i]);
}
// Marks a node and all its neighbors as part of the CC
void CC_par (int i, int cc)
{
int j, n;
/* if node has not been visited */
if (visited[i] == 0) {
/* add node to current component */
if (bots_verbose_mode) printf("Adding node %d to component %d\n", i, cc);
#pragma omp critical
{
visited[i] = 1;
components[cc]++;
}
/* add each neighbor's subtree to the current component */
for (j = 0; j < nodes[i].n; j++)
{
n = nodes[i].neighbor[j];
#pragma omp task untied firstprivate (i,cc)
{CC_par(n, cc);}
}
#pragma omp taskwait
}
}
void CC_seq (int i, int cc)
{
int j, n;
/* if node has not been visited */
if (visited[i] == 0) {
/* add node to current component */
if (bots_verbose_mode) printf("Adding node %d to component %d\n", i, cc);
{
visited[i] = 1;
components[cc]++;
}
/* add each neighbor's subtree to the current component */
for (j = 0; j < nodes[i].n; j++)
{
n = nodes[i].neighbor[j];
CC_seq(n, cc);
}
}
}
void cc_init()
{
int i;
/* initialize global data structures */
for (i = 0; i < bots_arg_size; i++)
{
visited[i] = 0;
components[i] = 0;
}
}
void cc_par(int *cc)
{
int i;
*cc = 0;
/* for all nodes ... unvisited nodes start a new component */
#pragma omp parallel
#pragma omp single
#pragma omp task untied
for (i = 0; i < bots_arg_size; i++)
{
if (visited[i] == 0)
{
#pragma omp task untied firstprivate (i,cc)
{CC_par(i, *cc);}
#pragma omp taskwait
(*cc)++;
}
}
}
void cc_seq(int *cc)
{
int i;
(*cc) = 0;
/* for all nodes ... unvisited nodes start a new component */
for (i = 0; i < bots_arg_size; i++)
{
if (visited[i] == 0)
{
CC_par(i, *cc);
(*cc)++;
}
}
}
int cc_check(int ccs, int ccp)
{
if (bots_verbose_mode) fprintf(stdout, "Sequential = %d CC, Parallel =%d CC\n", ccs, ccp);
if (ccs == ccp) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
normsq.c | // Jacobi 3D skeleton program
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timing.h"
int main(int argc, char** argv) {
double wct_start,wct_end,cput_start,cput_end,runtime,r;
int iter,size,i,j,k,n;
double *f1, *f2;
iter=1000;
double mintime = 4.0;
if (argc != 2 && argc != 3) {
printf("Usage: %s <size> [mintime]\n",argv[0]);
exit(1);
}
if (argc == 3) {
mintime = atof(argv[2]);
}
size = atoi(argv[1]);
f1 = malloc((size_t)size*sizeof(double));
f2 = malloc((size_t)size*sizeof(double));
#pragma omp parallel for schedule(static)
for (i = 0; i < size; i++) {
f1[i] = sin( (double) i * i);
f2[i] = cos( (double) 2*i);
}
// time measurement
timing(&wct_start, &cput_start);
double sum = 0.0;
while (1) {
timing(&wct_start, &cput_start);
for (j = 0; j < iter; j++) {
#pragma omp parallel for reduction(+:sum) schedule(static)
for (i = 0; i < size; i++) {
sum += f1[i]*f1[i];
}
}
timing(&wct_end, &cput_end);
// making sure mintime was spent, otherwise restart with 2*iter
if (wct_end - wct_start > mintime) {
break;
}
iter = iter * 2;
}
runtime = wct_end - wct_start;
printf("size:\t%d\ttime/iter:\t%lf\tGFLOP/s:\t%lf\n", size, runtime/iter, ((double)iter) * size * 1e-9 / runtime);
return 0;
}
|
hello2.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
omp_set_num_threads(8);
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
}
|
GB_memoryUsage.c | //------------------------------------------------------------------------------
// GB_memoryUsage: # of bytes used for a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GB_memoryUsage // count # allocated blocks and their sizes
(
int64_t *nallocs, // # of allocated memory blocks
size_t *mem_deep, // # of bytes in blocks owned by this matrix
size_t *mem_shallow, // # of bytes in blocks owned by another matrix
const GrB_Matrix A // matrix to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (nallocs != NULL) ;
ASSERT (mem_deep != NULL) ;
ASSERT (mem_shallow != NULL) ;
//--------------------------------------------------------------------------
// count the allocated blocks and their sizes
//--------------------------------------------------------------------------
// a matrix contains 0 to 10 dynamically malloc'd blocks
(*nallocs) = 0 ;
(*mem_deep) = 0 ;
(*mem_shallow) = 0 ;
if (A == NULL)
{
#pragma omp flush
return (GrB_SUCCESS) ;
}
GB_Pending Pending = A->Pending ;
if (!A->static_header)
{
(*nallocs)++ ;
(*mem_deep) += A->header_size ;
}
if (A->p != NULL)
{
if (A->p_shallow)
{
(*mem_shallow) += A->p_size ;
}
else
{
(*nallocs)++ ;
(*mem_deep) += A->p_size ;
}
}
if (A->h != NULL)
{
if (A->h_shallow)
{
(*mem_shallow) += A->h_size ;
}
else
{
(*nallocs)++ ;
(*mem_deep) += A->h_size ;
}
}
if (A->b != NULL)
{
if (A->b_shallow)
{
(*mem_shallow) += A->b_size ;
}
else
{
(*nallocs)++ ;
(*mem_deep) += A->b_size ;
}
}
if (A->i != NULL)
{
if (A->i_shallow)
{
(*mem_shallow) += A->i_size ;
}
else
{
(*nallocs)++ ;
(*mem_deep) += A->i_size ;
}
}
if (A->x != NULL)
{
if (A->x_shallow)
{
(*mem_shallow) += A->x_size ;
}
else
{
(*nallocs)++ ;
(*mem_deep) += A->x_size ;
}
}
if (Pending != NULL)
{
(*nallocs)++ ;
(*mem_deep) += Pending->header_size ;
}
if (Pending != NULL && Pending->i != NULL)
{
(*nallocs)++ ;
(*mem_deep) += Pending->i_size ;
}
if (Pending != NULL && Pending->j != NULL)
{
(*nallocs)++ ;
(*mem_deep) += Pending->j_size ;
}
if (Pending != NULL && Pending->x != NULL)
{
(*nallocs)++ ;
(*mem_deep) += Pending->x_size ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_binop__bget_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int64)
// C=scalar+B GB (_bind1st__bget_int64)
// C=scalar+B' GB (_bind1st_tran__bget_int64)
// C=A+scalar GB (_bind2nd__bget_int64)
// C=A'+scalar GB (_bind2nd_tran__bget_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITGET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITGET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT64 || GxB_NO_BGET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bget_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bget_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.