source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
omp-parallel-if.c | #include <omp.h>
extern void abort (void);
int
foo (void)
{
return 10;
}
main ()
{
int A = 0;
#pragma omp parallel if (foo () > 10) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 1)
abort ();
#pragma omp parallel if (foo () == 10) num_threads (3) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 3)
abort ();
#pragma omp parallel if (foo () == 10) num_threads (foo ()) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 10)
abort ();
return 0;
}
|
mixed_generic_criteria.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela, Riccardo Rossi, Carlos Roig and Ruben Zorrilla
//
#ifndef KRATOS_MIXED_GENERIC_CRITERIA_H
#define KRATOS_MIXED_GENERIC_CRITERIA_H
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "convergence_criteria.h"
// Application includes
namespace Kratos
{
///@addtogroup KratosCore
///@{
///@name Kratos Classes
///@{
/// Convergence criteria for mixed vector-scalar problems.
/**
This class implements a convergence control based on a nodal vector variable and
a nodal scalar variable. The error is evaluated separately for each of them, and
relative and absolute tolerances for both must be specified.
*/
template< class TSparseSpace, class TDenseSpace >
class MixedGenericCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(MixedGenericCriteria);
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef MixedGenericCriteria< TSparseSpace, TDenseSpace > ClassType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef std::vector<std::tuple<const VariableData*, TDataType, TDataType>> ConvergenceVariableListType;
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/// Constructor.
explicit MixedGenericCriteria()
: BaseType(),
mVariableSize(0)
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit MixedGenericCriteria(Kratos::Parameters ThisParameters)
: MixedGenericCriteria(GenerateConvergenceVariableListFromParameters(ThisParameters))
{
}
/**
* @brief Construct a new Mixed Generic Criteria object
* Construct the mixed generic convergence criteria from a convergence variables list.
* The convergence variable list contains for each variable the variable itself as well as the corresponding relative and absolute tolerances.
* @param rConvergenceVariablesList List containing tuples with the convergence variables to be checked. The tuples are set as <Variable, relative tolerance, absolute tolerance>
*/
MixedGenericCriteria(const ConvergenceVariableListType& rConvergenceVariablesList)
: BaseType()
, mVariableSize([&] (const ConvergenceVariableListType& rList) -> int {return rList.size();} (rConvergenceVariablesList))
, mVariableDataVector([&] (const ConvergenceVariableListType& rList) -> std::vector<const VariableData*> {
int i = 0;
std::vector<const VariableData*> aux_vect(mVariableSize);
for (const auto &r_tup : rList) {
aux_vect[i++] = std::get<0>(r_tup);
}
return aux_vect;
} (rConvergenceVariablesList))
, mRatioToleranceVector([&] (const ConvergenceVariableListType& rList) -> std::vector<TDataType> {
int i = 0;
std::vector<TDataType> aux_vect(mVariableSize);
for (const auto &r_tup : rList) {
aux_vect[i++] = std::get<1>(r_tup);
}
return aux_vect;
} (rConvergenceVariablesList))
, mAbsToleranceVector([&] (const ConvergenceVariableListType& rList) -> std::vector<TDataType> {
int i = 0;
std::vector<TDataType> aux_vect(mVariableSize);
for (const auto &r_tup : rList) {
aux_vect[i++] = std::get<2>(r_tup);
}
return aux_vect;
} (rConvergenceVariablesList))
, mLocalKeyMap([&] (const ConvergenceVariableListType& rList) -> std::unordered_map<KeyType, KeyType> {
KeyType local_key = 0;
std::unordered_map<KeyType, KeyType> aux_map;
for (const auto &r_tup : rList) {
const auto *p_var_data = std::get<0>(r_tup);
if (aux_map.find(p_var_data->Key()) != aux_map.end()) {
KRATOS_ERROR << "Convergence variable " << p_var_data->Name() << " is repeated. Check the input convergence variable list." << std::endl;
} else {
KRATOS_ERROR_IF(p_var_data->IsComponent()) << "Trying to check convergence with the " << p_var_data->Name() << " component variable. Use the corresponding vector one." << std::endl;
aux_map[p_var_data->Key()] = local_key++;
}
}
return aux_map;
} (rConvergenceVariablesList))
{}
/// Destructor.
~MixedGenericCriteria() override
{}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/// Compute relative and absoute error.
/**
* @param rModelPart Reference to the ModelPart containing the fluid problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b) override
{
// Check if we are solving for something
if (TSparseSpace::Size(Dx) != 0) {
// Calculate the convergence ratio and absolute norms
const auto convergence_norms = CalculateConvergenceNorms(rModelPart, rDofSet, Dx);
// Output convergence status
OutputConvergenceStatus(convergence_norms);
// Check convergence
return CheckConvergence(convergence_norms);
} else {
// Case in which all the DOFs are constrained!
return true;
}
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "mixed_generic_criteria";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "MixedGenericCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
protected:
///@name Protected Static Member Variables
///@{
///@}
///@name Protected Member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Get the Variable Size object
* Get the number of variables to be checked
* @return const int Number of variables to check
*/
int GetVariableSize() const
{
return mVariableSize;
}
/**
* @brief Get the Variable Data Vector object
* Get the member vector that stores pointers to the variables to check
* @return std::vector<VariableData*> Vector containing pointers to the variables to check
*/
std::vector<const VariableData*> GetVariableDataVector() const
{
return mVariableDataVector;
}
/**
* @brief Get the Ratio Tolerance Vector object
* Get the member vector containing the ratio tolerances for each variable to check
* @return std::vector<TDataType> Vector containing the ratio tolerances
*/
std::vector<TDataType> GetRatioToleranceVector() const
{
return mRatioToleranceVector;
}
/**
* @brief Get the Abs Tolerance Vector object
* Get the member vector containing the absolute tolerances for each variable to check
* @return std::vector<TDataType> Vector containing the absolute tolerances
*/
std::vector<TDataType> GetAbsToleranceVector() const
{
return mAbsToleranceVector;
}
/**
* @brief Get the Local Key Map object
* Returns a reference to the variable key local map
* @return std::unordered_map<KeyType, KeyType>& Reference to the local key map
*/
std::unordered_map<KeyType, KeyType>& GetLocalKeyMap()
{
return mLocalKeyMap;
}
/**
* @brief Calculate the convergence norms
* This method calculates the convergence norms for all the variables to be checked
* @param rModelPart Reference to the ModelPart containing the fluid problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rDx Vector of results (variations on nodal variables)
* @return std::tuple<std::vector<TDataType>, std::vector<TDataType>> Tuple containing the absolute and relative convergence values
*/
std::tuple<std::vector<TDataType>, std::vector<TDataType>> CalculateConvergenceNorms(
const ModelPart& rModelPart,
const DofsArrayType& rDofSet,
const TSystemVectorType& rDx)
{
// Initialize
std::vector<int> dofs_count(mVariableSize, 0);
std::vector<TDataType> solution_norms_vector(mVariableSize, 0.0);
std::vector<TDataType> increase_norms_vector(mVariableSize, 0.0);
// Accumulate the norm values
GetNormValues(rModelPart, rDofSet, rDx, dofs_count, solution_norms_vector, increase_norms_vector);
// Synchronize the norm values
const auto& r_data_comm = rModelPart.GetCommunicator().GetDataCommunicator();
auto global_solution_norms_vector = r_data_comm.SumAll(solution_norms_vector);
auto global_increase_norms_vector = r_data_comm.SumAll(increase_norms_vector);
auto global_dofs_count = r_data_comm.SumAll(dofs_count);
// Check division by zero in global solution norms
const double zero_tol = 1.0e-12;
for(int i = 0; i < mVariableSize; i++) {
if (global_solution_norms_vector[i] < zero_tol) {
global_solution_norms_vector[i] = 1.0;
}
}
// Calculate the norm values
std::vector<TDataType> var_ratio(mVariableSize, 0.0);
std::vector<TDataType> var_abs(mVariableSize, 0.0);
for(int i = 0; i < mVariableSize; i++) {
var_ratio[i] = std::sqrt(global_increase_norms_vector[i] / global_solution_norms_vector[i]);
var_abs[i] = std::sqrt(global_increase_norms_vector[i]) / static_cast<TDataType>(global_dofs_count[i]);
}
// Output the ratio and absolute norms as a tuple
return std::make_tuple(var_ratio, var_abs);
}
/**
* @brief Method to output the convergence status
* This method prints the convergence status to the screen for each one of the checked variables
* @param rConvergenceNorms Tuple containing the absolute and relative convergence values
*/
virtual void OutputConvergenceStatus(
const std::tuple<std::vector<TDataType>,
std::vector<TDataType>>& rConvergenceNorms)
{
const auto& var_ratio = std::get<0>(rConvergenceNorms);
const auto& var_abs = std::get<1>(rConvergenceNorms);
if (this->GetEchoLevel() > 0) {
std::ostringstream stringbuf;
stringbuf << "CONVERGENCE CHECK:\n";
const int max_length_var_name = (*std::max_element(mVariableDataVector.begin(), mVariableDataVector.end(), [](const VariableData* p_var_data_1, const VariableData* p_var_data_2){
return p_var_data_1->Name().length() < p_var_data_2->Name().length();
}))->Name().length();
for(int i = 0; i < mVariableSize; i++) {
const auto r_var_data = mVariableDataVector[i];
const int key_map = mLocalKeyMap[r_var_data->Key()];
const std::string space_str(max_length_var_name-r_var_data->Name().length(), ' ');
stringbuf << " " << r_var_data->Name() << space_str <<" : ratio = " << var_ratio[key_map] << "; exp.ratio = " << mRatioToleranceVector[key_map] << " abs = " << var_abs[key_map] << " exp.abs = " << mAbsToleranceVector[key_map] << "\n";
}
KRATOS_INFO("") << stringbuf.str();
}
}
/**
* @brief Method to check convergence
* This method checks the convergence of the provided norms with the user-defined tolerances
* @param rConvergenceNorms Tuple containing the absolute and relative convergence values
* @return true Convergence is satisfied
* @return false Convergence is not satisfied
*/
bool CheckConvergence(
const std::tuple<std::vector<TDataType>,
std::vector<TDataType>>& rConvergenceNorms)
{
bool is_converged = true;
const auto& var_ratio = std::get<0>(rConvergenceNorms);
const auto& var_abs = std::get<1>(rConvergenceNorms);
for (int i = 0; i < mVariableSize; i++) {
const auto r_var_data = mVariableDataVector[i];
const int key_map = mLocalKeyMap[r_var_data->Key()];
is_converged &= var_ratio[key_map] <= mRatioToleranceVector[key_map] || var_abs[key_map] <= mAbsToleranceVector[key_map];
}
// Note that this check ensures that all the convergence variables fulfil either the relative or the absolute criterion
if (is_converged) {
KRATOS_INFO_IF("", this->GetEchoLevel() > 0) << "*** CONVERGENCE IS ACHIEVED ***" << std::endl;
return true;
} else {
return false;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Private Static Member Variables
///@{
///@}
///@name Private Member Variables
///@{
const int mVariableSize;
const std::vector<const VariableData*> mVariableDataVector;
const std::vector<TDataType> mRatioToleranceVector;
const std::vector<TDataType> mAbsToleranceVector;
std::unordered_map<KeyType, KeyType> mLocalKeyMap;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief Get the Norm Values
* This function accumulates the solution and increment norm values in the provided arrays.
* Note that these arrays are assumed to be already initialized to zero.
* @param rModelPart Reference to the ModelPart containing the fluid problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rDx Vector of results (variations on nodal variables)
* @param rDofsCount Array containing the number of DOFs per variable
* @param rSolutionNormsVector Array containing the solution norms accumulated values for each variable checked
* @param rIncreaseNormsVector Array containing the correction norms accumulated values for each variable checked
*/
virtual void GetNormValues(
const ModelPart& rModelPart,
const DofsArrayType& rDofSet,
const TSystemVectorType& rDx,
std::vector<int>& rDofsCount,
std::vector<TDataType>& rSolutionNormsVector,
std::vector<TDataType>& rIncreaseNormsVector)
{
int n_dofs = rDofSet.size();
// Loop over Dofs
#pragma omp parallel
{
// Local thread variables
int dof_id;
TDataType dof_dx;
TDataType dof_value;
// Local reduction variables
std::vector<TDataType> var_solution_norm_reduction(mVariableSize);
std::vector<TDataType> var_correction_norm_reduction(mVariableSize);
std::vector<int> dofs_counter_reduction(mVariableSize);
for (int i = 0; i < mVariableSize; i++) {
var_solution_norm_reduction[i] = 0.0;
var_correction_norm_reduction[i] = 0.0;
dofs_counter_reduction[i] = 0;
}
#pragma omp for
for (int i = 0; i < n_dofs; i++) {
auto it_dof = rDofSet.begin() + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
dof_value = it_dof->GetSolutionStepValue(0);
dof_dx = TSparseSpace::GetValue(rDx, dof_id);
const auto &r_current_variable = it_dof->GetVariable();
int var_local_key = mLocalKeyMap[r_current_variable.IsComponent() ? r_current_variable.GetSourceVariable().Key() : r_current_variable.Key()];
var_solution_norm_reduction[var_local_key] += dof_value * dof_value;
var_correction_norm_reduction[var_local_key] += dof_dx * dof_dx;
dofs_counter_reduction[var_local_key]++;
}
}
#pragma omp critical
{
for (int i = 0; i < mVariableSize; i++) {
rDofsCount[i] += dofs_counter_reduction[i];
rSolutionNormsVector[i] += var_solution_norm_reduction[i];
rIncreaseNormsVector[i] += var_correction_norm_reduction[i];
}
}
}
}
/**
* @brief This method generates the list of variables from Parameters
* @param ThisParameters Input parameters
* @return List of variables considered as input
*/
static ConvergenceVariableListType GenerateConvergenceVariableListFromParameters(Kratos::Parameters ThisParameters)
{
// Iterate over variables
ConvergenceVariableListType aux_list;
if (!ThisParameters.Has("convergence_variables_list")) return aux_list;
Kratos::Parameters convergence_variables_list = ThisParameters["convergence_variables_list"];
for (auto param : convergence_variables_list) {
if (param.Has("variable")) {
const std::string& r_variable_name = param["variable"].GetString();
// Variable pointer
const VariableData* p_variable = KratosComponents<Variable<double>>::Has(r_variable_name) ? dynamic_cast<const VariableData*>(&KratosComponents<Variable<double>>::Get(r_variable_name)) : dynamic_cast<const VariableData*>(&KratosComponents<Variable<array_1d<double, 3>>>::Get(r_variable_name));
// Tolerances
const double rel_tol = param.Has("relative_tolerance") ? param["relative_tolerance"].GetDouble() : 1.0e-4;
const double abs_tol = param.Has("absolute_tolerance") ? param["absolute_tolerance"].GetDouble() : 1.0e-9;
// Push back list
aux_list.push_back(std::make_tuple(p_variable, rel_tol, abs_tol));
}
}
return aux_list;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
};
///@} // Kratos classes
///@} // Application group
}
#endif // KRATOS_MIXED_GENERIC_CRITERIA_H
|
tensor_cpu-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2014 by Contributors
* \file tensor_cpu-inl.h
* \brief implementation of CPU host code
* \author Bing Xu, Tianqi Chen
*/
#ifndef MSHADOW_TENSOR_CPU_INL_H_
#define MSHADOW_TENSOR_CPU_INL_H_
#include <cstring>
#include <functional>
#include <utility>
#include <vector>
#include "./base.h"
#include "./tensor.h"
#include "./packet-inl.h"
#include "./dot_engine-inl.h"
namespace mshadow {
template<>
inline void InitTensorEngine<cpu>(int dev_id) {
}
template<>
inline void ShutdownTensorEngine<cpu>(void) {
}
template<>
inline void SetDevice<cpu>(int devid) {
}
template<>
inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle,
bool create_dnn_handle,
int dev_id) {
return new Stream<cpu>();
}
template<>
inline void DeleteStream<cpu>(Stream<cpu> *stream) {
delete stream;
}
template<int ndim>
inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*)
os << '(';
for (int i = 0; i < ndim; ++i) {
if (i != 0) os << ',';
os << shape[i];
}
// python style tuple
if (ndim == 1) os << ',';
os << ')';
return os;
}
template<typename xpu>
inline void *AllocHost_(size_t size);
template<typename xpu>
inline void FreeHost_(void * dptr);
#ifdef __CUDACC__
template<>
inline void *AllocHost_<gpu>(size_t size) {
void *dptr;
MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable));
return dptr;
}
template<>
inline void FreeHost_<gpu>(void *dptr) {
MSHADOW_CUDA_CALL(cudaFreeHost(dptr));
}
#endif
template<>
inline void *AllocHost_<cpu>(size_t size) {
size_t pitch;
return packet::AlignedMallocPitch(&pitch, size, 1);
}
template<>
inline void FreeHost_<cpu>(void *dptr) {
packet::AlignedFree(dptr);
}
template<typename xpu, int dim, typename DType>
inline void AllocHost(Tensor<cpu, dim, DType> *obj) {
obj->stride_ = obj->size(dim - 1);
CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost";
void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType));
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename xpu, int dim, typename DType>
inline void FreeHost(Tensor<cpu, dim, DType> *obj) {
if (obj->dptr_ == NULL) {
LOG(FATAL) << "FreeHost:: double free";
}
FreeHost_<xpu>(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) {
size_t pitch;
void *dptr;
if (pad) {
dptr = packet::AlignedMallocPitch
(&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]);
obj->stride_ = static_cast<index_t>(pitch / sizeof(DType));
} else {
obj->stride_ = obj->size(dim - 1);
dptr = packet::AlignedMallocPitch
(&pitch, obj->shape_.Size() * sizeof(DType), 1);
}
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename Device, typename DType, int dim>
inline Tensor<Device, dim, DType>
NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) {
Tensor<Device, dim, DType> obj(shape);
obj.stream_ = stream_;
AllocSpace(&obj, pad);
MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv));
return obj;
}
template<int dim, typename DType>
inline void FreeSpace(Tensor<cpu, dim, DType> *obj) {
packet::AlignedFree(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void Copy(Tensor<cpu, dim, DType> _dst,
const Tensor<cpu, dim, DType> &_src,
Stream<cpu> *stream) {
CHECK_EQ(_dst.shape_, _src.shape_)
<< "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_;
if (_dst.CheckContiguous() && _src.CheckContiguous()) {
memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size());
} else {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
Tensor<cpu, 2, DType> src = _src.FlatTo2D();
for (index_t y = 0; y < dst.size(0); ++y) {
memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1));
}
}
}
template<typename Saver, typename R, int dim,
typename DType, typename E>
inline void MapPlan(TRValue<R, cpu, dim, DType> *dst,
const expr::Plan<E, DType> &plan) {
Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D();
expr::Plan<R, DType> dplan = expr::MakePlan(dst->self());
#ifndef __CUDACC__
#pragma omp parallel for
#endif
// temp remove openmp, as default setting throttles CPU
for (openmp_index_t y = 0; y < shape[0]; ++y) {
for (index_t x = 0; x < shape[1]; ++x) {
// trust your compiler! -_- they will optimize it
Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x));
}
}
}
// code to handle SSE optimization
template<bool pass_check, typename Saver,
typename R, int dim,
typename DType, typename E, int etype>
struct MapExpCPUEngine {
inline static void Map(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
MapPlan<Saver>(dst, MakePlan(exp.self()));
}
};
template<typename SV, int dim, typename DType, typename E, int etype>
struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>,
dim, DType, E, etype> {
inline static void Map(Tensor<cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) &&
expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) {
expr::MapPacketPlan<SV>(dst->self(),
expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self()));
} else {
MapPlan<SV>(dst, MakePlan(exp.self()));
}
}
};
template<typename Saver, typename R, int dim,
typename DType, typename E, int etype>
inline void MapExp(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass>
::Error_All_Tensor_in_Exp_Must_Have_Same_Type();
Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self());
Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self());
CHECK(eshape[0] == 0 || eshape == dshape)
<< "Assignment: Shape of Tensors are not consistent with target, "
<< "eshape: " << eshape << " dshape:" << dshape;
MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass,
Saver, R, dim, DType, E, etype>
::Map(dst->ptrself(), exp);
}
template<typename Saver, typename Reducer,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self()).FlatTo2D();
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match";
CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor";
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#ifndef __CUDACC__
#pragma omp parallel for
#endif
for (openmp_index_t x = 0; x < eshape[1]; ++x) {
DType res = splan.Eval(0, x);
for (index_t y = 1; y < eshape[0]; ++y) {
Reducer::Reduce(res, splan.Eval(y, x));
}
Saver::template Save<DType>(dplan.REval(0, x), res * scale);
}
}
template<typename Saver, typename Reducer, int dimkeep,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
typedef Shape<expr::ExpInfo<E>::kDim> EShape;
EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self());
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[dimkeep], dshape[0])
<< "MapReduceKeepHighDim::reduction dimension do not match";
// use equvalent form
Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep),
eshape[dimkeep],
eshape.ProdShape(dimkeep + 1, EShape::kSubdim),
eshape[EShape::kSubdim]);
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#ifndef __CUDACC__
#pragma omp parallel for
#endif
for (openmp_index_t c = 0; c < pshape[1]; ++c) {
DType res; Reducer::SetInitValue(res);
for (index_t n = 0; n < pshape[0]; ++n) {
DType tres; Reducer::SetInitValue(tres);
for (index_t y = 0; y < pshape[2]; ++y) {
for (index_t x = 0; x < pshape[3]; ++x) {
Reducer::Reduce(tres,
splan.Eval((n * pshape[1] + c) * pshape[2] + y, x));
}
}
Reducer::Reduce(res, tres);
}
Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale));
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 1, DType> dst,
const Tensor<cpu, 1, DType> &energy) {
DType mmax = energy[0];
for (index_t x = 1; x < dst.size(0); ++x) {
if (mmax < energy[x]) mmax = energy[x];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] = std::exp(energy[x] - mmax);
sum += dst[x];
}
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] /= sum;
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
template<typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f + alpha;
} else {
dst[y][x] = src[y][x] - smooth_grad;
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
}
template<typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label,
const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f + alpha;
} else {
dst[y][x] = src[y][x] - smooth_grad;
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
template<typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f + alpha;
} else {
dst[y][x][n] = src[y][x][n] - smooth_grad;
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
}
template<typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label,
const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f + alpha;
} else {
dst[y][x][n] = src[y][x][n] - smooth_grad;
}
}
}
}
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
Softmax(dst[y], energy[y]);
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
for (index_t n = 0; n < dst.size(2); ++n) {
DType mmax = energy[y][0][n];
for (index_t x = 1; x < dst.size(1); ++x) {
if (mmax < energy[y][x][n]) mmax = energy[y][x][n];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = std::exp(energy[y][x][n] - mmax);
sum += dst[y][x][n];
}
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] /= sum;
}
}
}
}
template<bool clip, typename IndexType, typename DType>
inline void AddTakeGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
const index_t K = dst.shape_[0];
const index_t C = dst.shape_[1];
for (index_t y = 0; y < index.size(0); ++y) {
index_t j = index[y];
if (clip) {
if (j <= 0) j = 0;
else if (j >= K) j = K - 1;
} else {
j %= K;
if (j < 0) j += K;
}
for (index_t i = 0; i < C; ++i) {
dst[j][i] += src[y][i];
}
}
}
template<typename IndexType, typename DType>
inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& sorted,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < sorted.size(0); ++y) {
dst[sorted[y]] += src[index[y]];
}
}
template<typename IndexType, typename DType>
inline void IndexFill(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < index.size(0); ++y) {
for (index_t j = 0; j < src.size(1); j++) {
dst[index[y]][j] = src[y][j];
}
}
}
template<typename KDType, typename VDType>
inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values,
bool is_ascend) {
CHECK_EQ(keys.CheckContiguous(), true);
CHECK_EQ(values.CheckContiguous(), true);
CHECK_EQ(keys.size(0), values.size(0))
<< "The sizes of key/value are not equal! keys_size: " << keys.size(0)
<< "values_size: " << values.size(0);
std::vector<size_t> idx(keys.size(0));
std::vector<KDType> keys_vec(keys.size(0));
std::vector<VDType> values_vec(values.size(0));
for (int i = 0; i < keys.size(0); i++) {
idx[i] = i;
keys_vec[i] = keys[i];
values_vec[i] = values[i];
}
if (is_ascend) {
std::stable_sort(idx.begin(), idx.end(),
[&keys_vec](size_t i1, size_t i2)
{return keys_vec[i1] < keys_vec[i2]; });
} else {
std::stable_sort(idx.begin(), idx.end(),
[&keys_vec](size_t i1, size_t i2)
{return keys_vec[i1] > keys_vec[i2]; });
}
for (index_t i = 0; i < values.size(0); i++) {
keys[i] = keys_vec[idx[i]];
values[i] = values_vec[idx[i]];
}
}
template<typename Device, typename VDType, typename SDType>
inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) {
// We can sort each segments using two stable sorts
SortByKey(values, segments, true);
SortByKey(segments, values, true);
}
// blas related
template<typename Device, typename DType>
inline void VectorDot(Tensor<Device, 1, DType> dst,
const Tensor<Device, 1, DType> &lhs,
const Tensor<Device, 1, DType> &rhs) {
CHECK_EQ(lhs.size(0), rhs.size(0))
<< "VectorDot: Shape mismatch";
CHECK_EQ(dst.size(0), 1U)
<< "VectorDot: expect dst to be scalar";
expr::BLASEngine<Device, DType>::SetStream(lhs.stream_);
mshadow::expr::BLASEngine<Device, DType>::dot(
lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_);
}
template<bool transpose_left, bool transpose_right, typename Device, typename DType>
inline void BatchGEMM(Tensor<Device, 3, DType> dst,
const Tensor<Device, 3, DType> &lhs,
const Tensor<Device, 3, DType> &rhs,
DType alpha,
DType beta,
Tensor<Device, 1, DType*> workspace) {
index_t batch_size = dst.shape_[0];
expr::BLASEngine<Device, DType>::SetStream(dst.stream_);
Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1])
: lhs.shape_;
Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1])
: rhs.shape_;
CHECK_EQ(dst.CheckContiguous(), true);
CHECK_EQ(lhs.CheckContiguous(), true);
CHECK_EQ(rhs.CheckContiguous(), true);
CHECK(sleft[0] == batch_size && sright[0] == batch_size)
<< "BatchGEMM: batchsize must be equal."
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1])
<< "BatchGEMM: matrix shape mismatch"
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(workspace.size(0) >= 3 * batch_size)
<< "Workspace Size must be bigger than " << 3 * batch_size;
CHECK_EQ(workspace.CheckContiguous(), true);
// use column major argument to compatible with most BLAS
expr::BLASEngine<Device, DType>::batched_gemm
(dst.stream_,
transpose_right, transpose_left,
transpose_right ? rhs.size(1) : rhs.size(2),
transpose_left ? lhs.size(2) : lhs.size(1),
transpose_right ? rhs.size(2) : rhs.size(1),
alpha,
rhs.dptr_, rhs.stride_,
lhs.dptr_, lhs.stride_,
beta,
dst.dptr_, dst.stride_, batch_size,
workspace.dptr_);
}
} // namespace mshadow
#endif // MSHADOW_TENSOR_CPU_INL_H_
|
core_dtrtri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrtri.c, normal z -> d, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trtri
*
* Computes the inverse of an upper or lower
* triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* = PlasmaNonUnit: A is non-unit triangular;
* = PlasmaUnit: A is unit triangular.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @retval PlasmaSuccess on successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, A(i,i) is exactly zero. The triangular
* matrix is singular and its inverse can not be computed.
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dtrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
double *A, int lda)
{
return LAPACKE_dtrtri_work(LAPACK_COL_MAJOR,
lapack_const(uplo), lapack_const(diag),
n, A, lda);
}
/******************************************************************************/
void plasma_core_omp_dtrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
double *A, int lda,
int iinfo,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_dtrtri(uplo, diag,
n, A, lda);
if (info != 0)
plasma_request_fail(sequence, request, iinfo+info);
}
}
}
|
mozilla_ng_fmt_plug.c | /*
* Cracker for Mozilla's key3.db's master password.
*
* All the real logic here is borrowed from Milen Rangelov's Hashkill project
* and from Deque's article.
*
* Thanks to Jim Fougeron for all the help!
*
* This software is Copyright (c) 2014, Sanju Kholia <sanju.kholia [at]
* gmail.com> and Dhiru Kholia <dhiru [at] openwall.com>, and it is hereby
* released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mozilla;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mozilla);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include <stdint.h>
#include <openssl/des.h>
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include "sha.h"
#define FORMAT_LABEL "Mozilla"
#define FORMAT_NAME "Mozilla key3.db"
#define FORMAT_TAG "$mozilla$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "SHA1 3DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$mozilla$*3*20*1*5199adfab24e85e3f308bacf692115f23dcd4f8f*11*2a864886f70d010c050103*16*9debdebd4596b278de029b2b2285ce2e*20*2c4d938ccb3f7f1551262185ccee947deae3b8ae", "12345678"},
{"$mozilla$*3*20*1*4f184f0d3c91cf52ee9190e65389b4d4c8fc66f2*11*2a864886f70d010c050103*16*590d1771368107d6be64844780707787*20*b8458c712ffcc2ff938409804cf3805e4bb7d722", "openwall"},
{"$mozilla$*3*20*1*897f35ff10348f0d3a7739dbf0abddc62e2e64c3*11*2a864886f70d010c050103*16*1851b917997b3119f82b8841a764db62*20*197958dd5e114281f59f9026ad8b7cfe3de7196a", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
SHA_CTX pctx;
int global_salt_length;
unsigned char global_salt[20];
int local_salt_length; // entry-salt (ES)
unsigned char local_salt[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *keepptr;
int res;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
keepptr=strdup(ciphertext);
p = &keepptr[TAG_LENGTH];
if (*p != '*')
goto err;
++p;
if ((p = strtokm(p, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res != 3) /* we only know about this particular version */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* nnLen (we ignore nnlen) */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt */
goto err;
if (strlen(p) /2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidDatalen */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidData */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt_length */
goto err;
if (!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keepptr);
return 1;
err:
MEM_FREE(keepptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, SALT_SIZE); // cs.local_salt needs to be zero padded to length 20
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
cs.local_salt_length = atoi(p);
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
for (i = 0; i < cs.local_salt_length; i++)
cs.local_salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) |
atoi16[ARCH_INDEX(p[2 * i + 1])];
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
// Not stored in salt. This is the binary length
q = strchr(p, '*'); // password_check
p = q + 1;
// Not stored in salt, this is the binary.
q = strchr(p, '*'); // global_salt_length
p = q + 1;
cs.global_salt_length = atoi(p);
q = strchr(p, '*'); // global_salt
p = q + 1;
for (i = 0; i < cs.global_salt_length; i++)
cs.global_salt[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
// Calculate partial sha1 data for password hashing
SHA1_Init(&cs.pctx);
SHA1_Update(&cs.pctx, cs.global_salt, cs.global_salt_length);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p, *q;
int i;
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
q = strchr(p, '*'); // password_check
p = q + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// http://www.drh-consultancy.demon.co.uk/key3.html
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA_CTX ctx, ctxi, ctxo;
int i;
union {
unsigned char uc[64];
uint32_t ui[64/4];
} pad;
unsigned char buffer[20];
unsigned char tk[20];
unsigned char key[40];
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
// HP = SHA1(global-salt||password)
// Copy already calculated partial hash data
memcpy(&ctx, &cur_salt->pctx, sizeof(SHA_CTX));
SHA1_Update(&ctx, saved_key[index], saved_len[index]);
SHA1_Final(buffer, &ctx);
// CHP = SHA1(HP||entry-salt) // entry-salt (ES) is local_salt
SHA1_Init(&ctx);
SHA1_Update(&ctx, buffer, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
// Step 0 for all hmac, store off the first half (the key is the same for all 3)
// this will avoid having to setup the ipad/opad 2 times, and also avoids 4 SHA calls
// reducing the hmac calls from 12 SHA limbs, down to 8 and ipad/opad loads from 3
// down to 1. It adds 4 CTX memcpy's, but that is a very fair trade off.
SHA1_Init(&ctxi);
SHA1_Init(&ctxo);
memset(pad.uc, 0x36, 64);
for (i = 0; i < 20; ++i)
pad.uc[i] ^= buffer[i];
SHA1_Update(&ctxi, pad.uc, 64);
for (i = 0; i < 64/4; ++i)
pad.ui[i] ^= 0x36363636^0x5c5c5c5c;
SHA1_Update(&ctxo, pad.uc, 64);
// k1 = HMAC(PES||ES) // use CHP as the key, PES is ES which is zero padded to length 20
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(key, &ctx);
// tk = HMAC(PES) // use CHP as the key
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(tk, &ctx);
// k2 = HMAC(tk||ES) // use CHP as the key
// NOTE, ctxi and ctxo are no longer needed after this hmac, so we simply use them
SHA1_Update(&ctxi, tk, 20);
SHA1_Update(&ctxi, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctxi);
SHA1_Update(&ctxo, buffer, 20);
SHA1_Final(key+20, &ctxo);
// k = k1||k2 // encrypt "password-check" string using this key
DES_set_key((DES_cblock *) key, &ks1);
DES_set_key((DES_cblock *) (key+8), &ks2);
DES_set_key((DES_cblock *) (key+16), &ks3);
memcpy(ivec, key + 32, 8); // last 8 bytes!
// PKCS#5 padding (standard block padding)
DES_ede3_cbc_encrypt((unsigned char*)"password-check\x02\x02", (unsigned char*)crypt_out[index], 16, &ks1, &ks2, &ks3, &ivec, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mozilla_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mozilla = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
BINARY_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mozilla_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
lr_CPM_paralelo.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define frand(M) (M*(((double)rand())/RAND_MAX))
#define N 2000000
double X[N];
double Y[N];
double cost (int nn, double vx[], double vy[], double t0, double t1)
{
int i;
double val,sum=0.0;
#pragma omp parallel for reduction (+:sum) private (i,val)
for(i=0;i<nn;i++)
{
val = t0 + t1*vx[i] - vy[i];
sum += val * val;
}
sum /= 2*nn;
return(sum);
}
int gradientDescent (int nn, double vx[], double vy[], double alpha, double *the0, double *the1)
{
int i;
double val;
double z0,z1;
double c=0,ca;
double t0=*the0, t1=*the1;
double a_n = alpha/nn;
int iter = 0;
double error = 0.000009; // cinc decimals
do
{
z0 = z1 = 0.0;
#pragma omp parallel for reduction (+:z0,z1) private (i,val)
for(i=0;i<nn;i++)
{
val = t0 + t1*vx[i] - vy[i];
z0 += val;
z1 += val * vx[i];
}
t0 -= z0 * a_n;
t1 -= z1 * a_n;
iter++;
ca = c;
c = cost(nn,vx,vy,t0,t1);
}
while (fabs(c - ca) > error);
*the0 = t0;
*the1 = t1;
return(iter);
}
int main()
{
int i;
double ct;
double theta0=0, theta1=1;
srand(1);
for (i=0;i<N;i++)
{
X[i] = frand(13);
Y[i] = frand(9) + ((1.66 + (frand(0.9))) * X[i]) * X[i] ;
}
//for (i=0;i<N;i++) printf("%g %g\n",X[i],Y[i]);
i=gradientDescent (N, X, Y, 0.01, &theta0, &theta1);
ct=cost(N,X,Y,theta0,theta1);
printf ("(%d) Theta; %g, %g cost: %g\n",i,theta0,theta1,ct);
return(0);
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "dark_cuda.h"
#include "box.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMCHARS 37
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = random_gen()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed)
{
int speed = rand_int(1, augment_speed);
if (speed < 1) speed = 1;
char** sequentia_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d, mini_batch = %d \n", n, mini_batch);
unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int));
for (i = 0; i < mini_batch; ++i) {
start_time_indexes[i] = random_gen() % m;
//printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]);
}
for (i = 0; i < n; ++i) {
do {
int time_line_index = i % mini_batch;
unsigned int index = start_time_indexes[time_line_index] % m;
start_time_indexes[time_line_index] += speed;
//int index = random_gen() % m;
sequentia_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf(" index = %u - grp: %s \n", index, paths[index]);
if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]);
} while (strlen(sequentia_paths[i]) == 0);
}
free(start_time_indexes);
pthread_mutex_unlock(&mutex);
return sequentia_paths;
}
char **get_random_paths(char **paths, int n, int m)
{
char** random_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d \n", n);
for(i = 0; i < n; ++i){
do {
int index = random_gen() % m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf("grp: %s\n", paths[index]);
if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]);
} while (strlen(random_paths[i]) == 0);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char** replace_paths = (char**)calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_augment_image(im, angle, aspect, min, max, size);
int flip = use_flip ? random_gen() % 2 : 0;
if (flip)
flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
extern int check_mistakes;
box_label *read_boxes(char *filename, int *n)
{
box_label* boxes = (box_label*)calloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file) {
printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename);
//file_error(filename);
FILE* fw = fopen("bad.list", "a");
fwrite(filename, sizeof(char), strlen(filename), fw);
char *new_line = "\n";
fwrite(new_line, sizeof(char), strlen(new_line), fw);
fclose(fw);
if (check_mistakes) getchar();
*n = 0;
return boxes;
}
float x, y, h, w;
int id;
int count = 0;
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = random_gen()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 ||
(boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1)
{
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .001 || h < .001) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy,
int net_w, int net_h)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
int i;
box_label *boxes = read_boxes(labelpath, &count);
int min_w_h = 0;
float lowest_w = 1.F / net_w;
float lowest_h = 1.F / net_h;
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if (count > num_boxes) count = num_boxes;
float x, y, w, h;
int id;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
// not detect small objects
//if ((w < 0.001F || h < 0.001F)) continue;
// if truth (box for object) is smaller than 1x1 pix
char buff[256];
if (id >= classes) {
printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1));
sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1));
system(buff);
getchar();
++sub;
continue;
}
if ((w < lowest_w || h < lowest_h)) {
//sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath);
//system(buff);
++sub;
continue;
}
if (x == 999999 || y == 999999) {
printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n");
sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (x <= 0 || x > 1 || y <= 0 || y > 1) {
printf("\n Wrong annotation: x = %f, y = %f \n", x, y);
sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (w > 1) {
printf("\n Wrong annotation: w = %f \n", w);
sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w);
system(buff);
w = 1;
if (check_mistakes) getchar();
}
if (h > 1) {
printf("\n Wrong annotation: h = %f \n", h);
sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h);
system(buff);
h = 1;
if (check_mistakes) getchar();
}
if (x == 0) x += lowest_w;
if (y == 0) y += lowest_h;
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
if (min_w_h == 0) min_w_h = w*net_w;
if (min_w_h > w*net_w) min_w_h = w*net_w;
if (min_w_h > h*net_h) min_w_h = h*net_h;
}
free(boxes);
return min_w_h;
}
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels_custom(char *filename, int *size)
{
list *plist = get_paths(filename);
if(size) *size = plist->size;
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
char **get_labels(char *filename)
{
return get_labels_custom(filename, NULL);
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = random_gen()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
void blend_truth(float *new_truth, int boxes, float *old_truth)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + t*t_size;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
new_truth_ptr[0] = old_truth_ptr[0];
new_truth_ptr[1] = old_truth_ptr[1];
new_truth_ptr[2] = old_truth_ptr[2];
new_truth_ptr[3] = old_truth_ptr[3];
new_truth_ptr[4] = old_truth_ptr[4];
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
#ifdef OPENCV
#include "http_stream.h"
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup,
float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5*boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1)
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
const char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
int flag = (c >= 3);
mat_cv *src;
src = load_image_mat_cv(filename, flag);
if (src == NULL) {
if (check_mistakes) getchar();
continue;
}
int oh = get_height_mat(src);
int ow = get_width_mat(src);
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
//blur = rand_int(0, 1) ? (use_blur) : 0;
int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image
if (tmp_blur == 2) blur = use_blur;
else blur = tmp_blur;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh)/2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow)/2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (min_w_h/4 < blur && blur > 1) blur = min_w_h / 4; // disable blur if one of the objects is too small
image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp,
blur, boxes, d.y.vals[i]);
if (i_mixup) {
image old_img = ai;
old_img.data = d.X.vals[i];
//show_image(ai, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images_cv(ai, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5*boxes * sizeof(float));
if (show_imgs)// && i_mixup) // delete i_mixup
{
image tmp_ai = copy_image(ai);
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*ai.w;
int right = (b.x + b.w / 2.)*ai.w;
int top = (b.y - b.h / 2.)*ai.h;
int bot = (b.y + b.h / 2.)*ai.h;
draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(tmp_ai, buff);
if (show_imgs == 1) {
//char buff_src[1000];
//sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
//show_image_mat(src, buff_src);
show_image(tmp_ai, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
free_image(tmp_ai);
}
release_mat(&src);
free(truth);
}
}
free(random_paths);
if(mixup_random_paths) free(mixup_random_paths);
return d;
}
#else // OPENCV
void blend_images(image new_img, float alpha, image old_img, float beta)
{
int i;
int data_size = new_img.w * new_img.h * new_img.c;
#pragma omp parallel for
for (i = 0; i < data_size; ++i)
new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter,
float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = { 0 };
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5 * boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
image orig = load_image(filename, 0, 0, c);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh) / 2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow) / 2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
image sized = resize_image(cropped, w, h);
if (flip) flip_image(sized);
distort_image(sized, dhue, dsat, dexp);
//random_distort_image(sized, hue, saturation, exposure);
fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (i_mixup) {
image old_img = sized;
old_img.data = d.X.vals[i];
//show_image(sized, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images(sized, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = sized.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
if (show_imgs)// && i_mixup)
{
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*sized.w;
int right = (b.x + b.w / 2.)*sized.w;
int top = (b.y - b.h / 2.)*sized.h;
int bot = (b.y + b.h / 2.)*sized.h;
draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(sized, buff);
if (show_imgs == 1) {
show_image(sized, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n");
//getchar();
}
free_image(orig);
free_image(cropped);
free(truth);
}
}
free(random_paths);
if (mixup_random_paths) free(mixup_random_paths);
return d;
}
#endif // OPENCV
void *load_thread(void *ptr)
{
//srand(time(0));
//printf("Loading data: %d\n", random_gen());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter,
a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
}else if (a.type == LETTERBOX_DATA) {
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
//srand(time(0));
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data* buffers = (data*)calloc(args.threads, sizeof(data));
pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = (float**)calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = (float**)calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = random_gen()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data newdata = concat_data(d[i], out);
free_data(out);
out = newdata;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = random_gen()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i+b*10000][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = random_gen()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = (float**)calloc(num, sizeof(float*));
r.y.vals = (float**)calloc(num, sizeof(float*));
int i;
for(i = 0; i < num; ++i){
int index = random_gen()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data* split = (data*)calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = (float**)calloc(train.X.rows, sizeof(float*));
test.X.vals = (float**)calloc(test.X.rows, sizeof(float*));
train.y.vals = (float**)calloc(train.y.rows, sizeof(float*));
test.y.vals = (float**)calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
target-11.c | /* { dg-require-effective-target offload_device_nonshared_as } */
#include <stdlib.h>
#include <assert.h>
#define N 32
void test_array_section (int *p)
{
#pragma omp target data map(alloc: p[0:N])
{
int ok = 1;
for (int i = 10; i < 10 + 4; i++)
p[i] = 997 * i;
#pragma omp target map(always to:p[10:4]) map(tofrom: ok)
for (int i = 10; i < 10 + 4; i++)
if (p[i] != 997 * i)
ok = 0;
assert (ok);
#pragma omp target map(always from:p[7:9])
for (int i = 0; i < N; i++)
p[i] = i;
}
}
int main ()
{
int aa = 0, bb = 0, cc = 0, dd = 0;
#pragma omp target data map(tofrom: aa) map(to: bb) map(from: cc, dd)
{
int ok;
aa = bb = cc = 1;
/* Set dd on target to 0 for the further check. */
#pragma omp target map(always to: dd)
;
dd = 1;
#pragma omp target map(tofrom: aa) map(always to: bb) \
map(always from: cc) map(to: dd) map(from: ok)
{
/* bb is always to, aa and dd are not. */
ok = (aa == 0) && (bb == 1) && (dd == 0);
aa = bb = cc = dd = 2;
}
assert (ok);
assert (aa == 1);
assert (bb == 1);
assert (cc == 2); /* cc is always from. */
assert (dd == 1);
dd = 3;
#pragma omp target map(from: cc) map(always to: dd) map(from: ok)
{
ok = (dd == 3); /* dd is always to. */
cc = dd = 4;
}
assert (ok);
assert (cc == 2);
assert (dd == 3);
}
assert (aa == 2);
assert (bb == 1);
assert (cc == 4);
assert (dd == 4);
int *array = calloc (N, sizeof (int));
test_array_section (array);
for (int i = 0; i < 7; i++)
assert (array[i] == 0);
for (int i = 7; i < 7 + 9; i++)
assert (array[i] == i);
for (int i = 7 + 9; i < N; i++)
assert (array[i] == 0);
free (array);
return 0;
}
|
operators.h | /*
Project Name : OpenMEEG
© INRIA and ENPC (contributors: Geoffray ADDE, Maureen CLERC, Alexandre
GRAMFORT, Renaud KERIVEN, Jan KYBIC, Perrine LANDREAU, Théodore PAPADOPOULO,
Emmanuel OLIVI
Maureen.Clerc.AT.inria.fr, keriven.AT.certis.enpc.fr,
kybic.AT.fel.cvut.cz, papadop.AT.inria.fr)
The OpenMEEG software is a C++ package for solving the forward/inverse
problems of electroencephalography and magnetoencephalography.
This software is governed by the CeCILL-B license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL-B
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's authors, the holders of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL-B license and that you accept its terms.
*/
/// \file
/// \brief File containing the integral operators.
#pragma once
#include <iostream>
#include <vector.h>
#include <matrix.h>
#include <symmatrix.h>
#include <sparse_matrix.h>
#include <geometry.h>
#include <integrator.h>
#include <analytics.h>
#include <progressbar.h>
namespace OpenMEEG {
// TODO: Use overloading and remove the internal suffix.
void operatorSinternal(const Mesh&,Matrix&,const Vertices&,const double&);
void operatorDinternal(const Mesh&,Matrix&,const Vertices&,const double&);
void operatorFerguson(const Vect3&,const Mesh&,Matrix&,const unsigned&,const double&);
void operatorDipolePotDer(const Vect3&,const Vect3&,const Mesh&,Vector&,const double&,const unsigned,const bool);
void operatorDipolePot(const Vect3&,const Vect3&,const Mesh&,Vector&,const double&,const unsigned,const bool);
void operatorMonopolePotDer(const Vect3&, const double&, const Mesh&, Vector&, const double&, const unsigned, const bool);
void operatorMonopolePot(const Vect3&, const double&, const Mesh&, Vector&, const double&, const unsigned, const bool);
namespace Details {
// #define ADAPT_LHS
template <template <typename,typename> class Integrator>
void operatorDipolePot(const Vect3& r0,const Vect3& q,const Mesh& m,Vector& rhs,const double& coeff,const unsigned gauss_order) {
static analyticDipPot anaDP;
anaDP.init(q,r0);
Integrator<double,analyticDipPot> gauss(0.001);
gauss->setOrder(gauss_order);
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle : m.triangles()) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit=m.triangles().begin();tit<m.triangles().end();++tit) {
const Triangle& triangle = *tit;
#else
for (int i=0;i<m.triangles().size();++i) {
const Triangle& triangle = *(m.triangles().begin()+i);
#endif
const double d = gauss->integrate(anaDP,triangle);
#pragma omp critical
rhs(triangle.index()) += d*coeff;
}
}
// T can be a Matrix or SymMatrix
template <typename T>
inline void operatorD(const Triangle& T1,const Triangle& T2,T& mat,const double& coeff,const unsigned gauss_order) {
//this version of operatorD add in the Matrix the contribution of T2 on T1
// for all the P1 functions it gets involved
// consider varying order of quadrature with the distance between T1 and T2
analyticD3 analyD(T2);
#ifdef ADAPT_LHS
AdaptiveIntegrator<Vect3, analyticD3> gauss(0.005);
gauss.setOrder(gauss_order);
#else
STATIC_OMP Integrator<Vect3, analyticD3> gauss(gauss_order);
#endif
const Vect3 total = gauss.integrate(analyD,T1);
for (unsigned i=0; i<3; ++i)
mat(T1.index(),T2.vertex(i).index()) += total(i)*coeff;
}
inline double operatorS(const analyticS& analyS,const Triangle& T2,const unsigned gauss_order) {
#ifdef ADAPT_LHS
AdaptiveIntegrator<double,analyticS> gauss(0.005);
#else
STATIC_OMP Integrator<double,analyticS> gauss;
#endif
gauss.setOrder(gauss_order);
return gauss.integrate(analyS,T2);
}
template <typename T>
inline double operatorN(const Vertex& V1,const Vertex& V2,const Mesh& m1,const Mesh& m2,const T& mat) {
const bool same_shared_vertex = ((&m1!=&m2) && (V1==V2));
const double factor = (same_shared_vertex) ? 0.5 : 0.25;
double result = 0.0;
for (const auto& tp1 : m1.triangles(V1)) {
const Edge& edge1 = tp1->edge(V1);
const Vect3& CB1 = edge1.vertex(0)-edge1.vertex(1);
const unsigned ind1 = tp1->index()-m1.triangles().front().index();
for (const auto& tp2 : m2.triangles(V2)) {
const unsigned ind2 = tp2->index()-m2.triangles().front().index();
// In the second case, we here divided (precalculated) operatorS by the product of areas.
const double Iqr = (m1.current_barrier() || m2.current_barrier()) ? mat(ind1,ind2) : mat(tp1->index(),tp2->index())/(tp1->area()*tp2->area());
const Edge& edge2 = tp2->edge(V2);
const Vect3& CB2 = edge2.vertex(0)-edge2.vertex(1);
result -= factor*Iqr*dotprod(CB1,CB2);
}
}
return result;
}
inline Vect3 operatorFerguson(const Vect3& x,const Vertex& V,const Mesh& m) {
Vect3 result;
result = 0.0;
// Loop over triangles of which V is a vertex
for (const auto& tp : m.triangles(V)) {
const Triangle& T = *tp;
const Edge& edge = T.edge(V);
// A, B are the two opposite vertices to V (triangle A, B, V)
const Vertex& A = edge.vertex(0);
const Vertex& B = edge.vertex(1);
const Vect3 AB = (A-B)*(0.5/T.area());
analyticS analyS(V,A,B);
const double opS = analyS.f(x);
result += (AB*opS);
}
return result;
}
inline double operatorP1P0(const Triangle& T2,const Vertex& V1) {
double result = 0.;
if (T2.contains(V1))
result = T2.area()/3.0;
return result;
}
template <typename T>
void operatorD(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function (OPTIMIZED VERSION) has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be appleid to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
// In this version of the function, in order to skip multiple computations of the same quantities
// loops are run over the triangles but the Matrix cannot be filled in this function anymore
// That's why the filling is done is function Details::operatorD
//
ProgressBar pb(m1.triangles().size());
const Triangles& m1_triangles = m1.triangles();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle1 : m1_triangles) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit1=m1_triangles.begin();tit1<m1_triangles.end();++tit1) {
const Triangle& triangle1 = *tit1;
#else
for (int i1=0; i1 < m1_triangles.size(); ++i1) {
const Triangle& triangle1 = *(m1_triangles.begin()+i1);
#endif
for (const auto& triangle2 : m2.triangles())
Details::operatorD(triangle1,triangle2,mat,coeff,gauss_order);
++pb;
}
}
}
template <typename T>
void operatorN(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be applied to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR N ... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl;
if (&m1==&m2) {
auto NUpdate = [&](const Mesh& m,const auto& M) {
ProgressBar pb(m1.vertices().size());
for (auto vit1=m.vertices().begin();vit1!=m.vertices().end();++vit1) {
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_ITERATOR
for (auto vit2=vit1;vit2<m.vertices().end();++vit2) {
#else
for (int i2=0;i2<=vit1-m1.vertices().begin();++i2) {
const auto vit2 = m1.vertices().begin()+i2;
#endif
mat((*vit1)->index(),(*vit2)->index()) += Details::operatorN(**vit1,**vit2,m,m,M)*coeff;
}
++pb;
}
};
if (m1.current_barrier()) {
// Precompute operator S divided by the product of triangles area.
ProgressBar pb(m1.triangles().size());
SymMatrix matS(m1.triangles().size());
for (Triangles::const_iterator tit1=m1.triangles().begin();tit1!=m1.triangles().end();++tit1) {
const analyticS analyS(*tit1);
const unsigned ind1 = tit1->index()-m1.triangles().front().index();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=tit1;tit2<m1.triangles().end();++tit2) {
#else
for (int i2=tit1-m1.triangles().begin();i2<m1.triangles().size();++i2) {
const Triangles::const_iterator tit2 = m1.triangles().begin()+i2;
#endif
const unsigned ind2 = tit2->index()-m2.triangles().front().index();
matS(ind1,ind2) = Details::operatorS(analyS,*tit2,gauss_order)/(tit1->area()*tit2->area());
}
++pb;
}
NUpdate(m1,matS);
} else {
NUpdate(m1,mat);
}
} else {
auto NUpdate = [&](const Mesh& m1,const Mesh& m2,const auto& M) {
ProgressBar pb(m1.vertices().size());
const VerticesRefs& v2 = m2.vertices();
for (const auto& vertex1 : m1.vertices()) {
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& vertex2 : v2) {
#elif defined OPENMP_ITERATOR
for (auto vit2=v2.begin();vit2<v2.end();++vit2) {
const Vertex* vertex2 = *vit2;
#else
for (int i2=0;i2<v2.size();++i2) {
const Vertex* vertex2 = *(v2.begin()+i2);
#endif
mat(vertex1->index(),vertex2->index()) += Details::operatorN(*vertex1,*vertex2,m1,m2,M)*coeff;
}
++pb;
}
};
if (m1.current_barrier() || m2.current_barrier()) {
// Precompute operator S divided by the product of triangles area.
Matrix matS(m1.triangles().size(),m2.triangles().size());
ProgressBar pb(m1.triangles().size());
unsigned i = 0;
for (const auto& triangle1 : m1.triangles()) {
const analyticS analyS(triangle1);
const unsigned ind1 = triangle1.index()-m1.triangles().front().index();
const Triangles& m2_triangles = m2.triangles();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle2 : m2_triangles) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=m2_triangles.begin();tit2<m2_triangles.end();++tit2) {
const Triangle& triangle2 = *tit2;
#else
for (int i2=0;i2<m2_triangles.size();++i2) {
const Triangle& triangle2 = *(m2_triangles.begin()+i2);
#endif
const unsigned ind2 = triangle2.index()-m2_triangles.front().index();
matS(ind1,ind2) = Details::operatorS(analyS,triangle2,gauss_order)/(triangle1.area()*triangle2.area());
}
++pb;
}
NUpdate(m1,m2,matS);
} else {
NUpdate(m1,m2,mat);
}
}
}
template <typename T>
void operatorS(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be applied to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR S ... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl;
// The operator S is given by Sij=\Int G*PSI(I, i)*Psi(J, j) with
// PSI(A, a) is a P0 test function on layer A and triangle a
if (&m1==&m2) {
ProgressBar pb(m1.triangles().size());
for (Triangles::const_iterator tit1=m1.triangles().begin(); tit1!=m1.triangles().end(); ++tit1,++pb) {
const analyticS analyS(*tit1);
#pragma omp parallel for
#if defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=tit1;tit2<m1.triangles().end();++tit2) {
#else
for (int i2=tit1-m1.triangles().begin();i2<m1.triangles().size();++i2) {
const Triangles::const_iterator tit2 = m1.triangles().begin()+i2;
#endif
mat(tit1->index(),tit2->index()) = Details::operatorS(analyS,*tit2,gauss_order)*coeff;
}
}
} else {
// TODO check the symmetry of Details::operatorS.
// if we invert tit1 with tit2: results in HeadMat differs at 4.e-5 which is too big.
// using ADAPT_LHS with tolerance at 0.000005 (for Details::opS) drops this at 6.e-6. (but increase the computation time)
ProgressBar pb(m1.triangles().size());
for (const auto& triangle1 : m1.triangles()) {
const analyticS analyS(triangle1);
const Triangles& m2_triangles = m2.triangles();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle2 : m2_triangles) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=m2_triangles.begin();tit2<m2_triangles.end();++tit2) {
const Triangle& triangle2 = *tit2;
#else
for (int i2=0;i2<m2_triangles.size();++i2) {
const Triangle& triangle2 = *(m2_triangles.begin()+i2);
#endif
mat(triangle1.index(),triangle2.index()) = Details::operatorS(analyS,triangle2,gauss_order)*coeff;
}
++pb;
}
}
}
template <typename T>
void operatorD(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function (OPTIMIZED VERSION) has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be appleid to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR D... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl;
Details::operatorD(m1,m2,mat,coeff,gauss_order);
}
template <typename T>
void operatorDstar(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function (OPTIMIZED VERSION) has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be appleid to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR D*... (arg : mesh " << m1.name() << " , mesh " << m2.name() << ')' << std::endl;
Details::operatorD(m2,m1,mat,coeff,gauss_order);
}
template <typename T>
void operatorP1P0(const Mesh& m,T& mat,const double& coeff) {
// This time mat(i, j)+= ... the Matrix is incremented by the P1P0 operator
std::cout << "OPERATOR P1P0... (arg : mesh " << m.name() << " )" << std::endl;
for (const auto& triangle : m.triangles())
for (const auto& vertex : triangle)
mat(triangle.index(),vertex->index()) += Details::operatorP1P0(triangle,*vertex)*coeff;
}
}
|
tree.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/dataset.h>
#include <LightGBM/meta.h>
#include <string>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
* \param track_branch_features Whether to keep track of ancestors of leaf nodes
* \param is_linear Whether the tree has linear models at each leaf
*/
explicit Tree(int max_leaves, bool track_branch_features, bool is_linear);
/*!
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
virtual ~Tree() noexcept = default;
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \param missing_type missing type
* \param default_left default direction for missing value
* \return The index of new leaf.
*/
int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
double threshold_double, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight,
float gain, MissingType missing_type, bool default_left);
/*!
* \brief Performing a split on tree leaves, with categorical feature
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param real_feature Index of feature, the original index on data
* \param threshold_bin Threshold(bin) of split, use bitset to represent
* \param num_threshold_bin size of threshold_bin
* \param threshold Thresholds of real feature value, use bitset to represent
* \param num_threshold size of threshold
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param left_weight Weight of left child
* \param right_weight Weight of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin,
const uint32_t* threshold, int num_threshold, double left_value, double right_value,
int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = MaybeRoundToZero(output);
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
virtual void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Get upper bound leaf value of this tree model
*/
double GetUpperBoundValue() const;
/*!
* \brief Get lower bound leaf value of this tree model
*/
double GetLowerBoundValue() const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
inline void PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get parent of specific leaf*/
inline int leaf_parent(int leaf_idx) const {return leaf_parent_[leaf_idx]; }
/*! \brief Get feature of specific split (original feature index)*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature_inner(int split_idx) const { return split_feature_inner_[split_idx]; }
/*! \brief Get features on leaf's branch*/
inline std::vector<int> branch_features(int leaf) const { return branch_features_[leaf]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
inline double internal_value(int node_idx) const {
return internal_value_[node_idx];
}
inline bool IsNumericalSplit(int node_idx) const {
return !GetDecisionType(decision_type_[node_idx], kCategoricalMask);
}
inline int left_child(int node_idx) const { return left_child_[node_idx]; }
inline int right_child(int node_idx) const { return right_child_[node_idx]; }
inline uint32_t threshold_in_bin(int node_idx) const {
return threshold_in_bin_[node_idx];
}
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*! \brief Get the summed weights of data points that fall at or below this node*/
inline int data_weight(int node) const { return node >= 0 ? internal_weight_[node] : leaf_weight_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
virtual inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate);
if (is_linear_) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] * rate);
for (size_t j = 0; j < leaf_coeff_[i].size(); ++j) {
leaf_coeff_[i][j] = MaybeRoundToZero(leaf_coeff_[i][j] * rate);
}
}
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate);
if (is_linear_) {
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] * rate);
for (size_t j = 0; j < leaf_coeff_[num_leaves_ - 1].size(); ++j) {
leaf_coeff_[num_leaves_ - 1][j] = MaybeRoundToZero(leaf_coeff_[num_leaves_ - 1][j] * rate);
}
}
shrinkage_ *= rate;
}
inline double shrinkage() const { return shrinkage_; }
virtual inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val);
internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val);
}
leaf_value_[num_leaves_ - 1] =
MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val);
if (is_linear_) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_ - 1; ++i) {
leaf_const_[i] = MaybeRoundToZero(leaf_const_[i] + val);
}
leaf_const_[num_leaves_ - 1] = MaybeRoundToZero(leaf_const_[num_leaves_ - 1] + val);
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
if (is_linear_) {
leaf_const_[0] = val;
}
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize linear model of tree node to json*/
std::string LinearModelToJSON(int index) const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
inline static double MaybeRoundToZero(double fval) {
return IsZero(fval) ? 0 : fval;
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
int NextLeafId() const { return num_leaves_; }
/*! \brief Get the linear model constant term (bias) of one leaf */
inline double LeafConst(int leaf) const { return leaf_const_[leaf]; }
/*! \brief Get the linear model coefficients of one leaf */
inline std::vector<double> LeafCoeffs(int leaf) const { return leaf_coeff_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeaturesInner(int leaf) const {return leaf_features_inner_[leaf]; }
/*! \brief Get the linear model features of one leaf */
inline std::vector<int> LeafFeatures(int leaf) const {return leaf_features_[leaf]; }
/*! \brief Set the linear model coefficients on one leaf */
inline void SetLeafCoeffs(int leaf, const std::vector<double>& output) {
leaf_coeff_[leaf].resize(output.size());
for (size_t i = 0; i < output.size(); ++i) {
leaf_coeff_[leaf][i] = MaybeRoundToZero(output[i]);
}
}
/*! \brief Set the linear model constant term (bias) on one leaf */
inline void SetLeafConst(int leaf, double output) {
leaf_const_[leaf] = MaybeRoundToZero(output);
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeaturesInner(int leaf, const std::vector<int>& features) {
leaf_features_inner_[leaf] = features;
}
/*! \brief Set the linear model features on one leaf */
inline void SetLeafFeatures(int leaf, const std::vector<int>& features) {
leaf_features_[leaf] = features;
}
inline bool is_linear() const { return is_linear_; }
#ifdef USE_CUDA_EXP
inline bool is_cuda_tree() const { return is_cuda_tree_; }
#endif // USE_CUDA_EXP
inline void SetIsLinear(bool is_linear) {
is_linear_ = is_linear;
}
protected:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval) && missing_type != MissingType::NaN) {
fval = 0.0f;
}
if ((missing_type == MissingType::Zero && IsZero(fval))
|| (missing_type == MissingType::NaN && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == MissingType::Zero && fval == default_bin)
|| (missing_type == MissingType::NaN && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
int int_fval;
if (std::isnan(fval)) {
return right_child_[node];
} else {
int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];
}
}
int cat_idx = static_cast<int>(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = static_cast<int>(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
void TreeSHAPByMap(const std::unordered_map<int, double>& feature_values,
std::unordered_map<int, double>* phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief weight of leaves */
std::vector<double> leaf_weight_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief weight of non-leaf nodes */
std::vector<double> internal_weight_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
/*! \brief whether to keep track of ancestor nodes for each leaf (only needed when feature interactions are restricted) */
bool track_branch_features_;
/*! \brief Features on leaf's branch, original index */
std::vector<std::vector<int>> branch_features_;
double shrinkage_;
int max_depth_;
/*! \brief Tree has linear model at each leaf */
bool is_linear_;
/*! \brief coefficients of linear models on leaves */
std::vector<std::vector<double>> leaf_coeff_;
/*! \brief constant term (bias) of linear models on leaves */
std::vector<double> leaf_const_;
/* \brief features used in leaf linear models; indexing is relative to num_total_features_ */
std::vector<std::vector<int>> leaf_features_;
/* \brief features used in leaf linear models; indexing is relative to used_features_ */
std::vector<std::vector<int>> leaf_features_inner_;
#ifdef USE_CUDA_EXP
/*! \brief Marks whether this tree is a CUDATree */
bool is_cuda_tree_;
#endif // USE_CUDA_EXP
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt,
double left_weight, double right_weight, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = gain;
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_weight_[new_node_idx] = left_weight + right_weight;
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_weight_[leaf] = left_weight;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_weight_[num_leaves_] = right_weight;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
if (track_branch_features_) {
branch_features_[num_leaves_] = branch_features_[leaf];
branch_features_[num_leaves_].push_back(split_feature_[new_node_idx]);
branch_features_[leaf].push_back(split_feature_[new_node_idx]);
}
}
inline double Tree::Predict(const double* feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeaf(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat_raw = leaf_features_[leaf][i];
double feat_val = feature_values[feat_raw];
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (is_linear_) {
int leaf = (num_leaves_ > 1) ? GetLeafByMap(feature_values) : 0;
double output = leaf_const_[leaf];
bool nan_found = false;
for (size_t i = 0; i < leaf_features_[leaf].size(); ++i) {
int feat = leaf_features_[leaf][i];
auto val_it = feature_values.find(feat);
if (val_it != feature_values.end()) {
double feat_val = val_it->second;
if (std::isnan(feat_val)) {
nan_found = true;
break;
} else {
output += leaf_coeff_[leaf][i] * feat_val;
}
}
}
if (nan_found) {
return LeafOutput(leaf);
} else {
return output;
}
} else {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::PredictContribByMap(const std::unordered_map<int, double>& feature_values,
int num_features, std::unordered_map<int, double>* output) {
(*output)[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK_GE(max_depth_, 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAPByMap(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
deconvolution_4x4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void deconv4x4s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i);
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3<w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
float32x4_t _out03 = vld1q_f32(outptr0 + 3);
_out03 = vmlaq_lane_f32(_out03, _v, vget_high_f32(_k0), 1);
vst1q_f32(outptr0 + 3, _out03);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
float32x4_t _out13 = vld1q_f32(outptr1 + 3);
_out13 = vmlaq_lane_f32(_out13, _v, vget_high_f32(_k1), 1);
vst1q_f32(outptr1 + 3, _out13);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
float32x4_t _out23 = vld1q_f32(outptr2 + 3);
_out23 = vmlaq_lane_f32(_out23, _v, vget_high_f32(_k2), 1);
vst1q_f32(outptr2 + 3, _out23);
//
float32x4_t _out30 = vld1q_f32(outptr3 + 0);
_out30 = vmlaq_lane_f32(_out30, _v, vget_low_f32(_k3), 0);
vst1q_f32(outptr3 + 0, _out30);
float32x4_t _out31 = vld1q_f32(outptr3 + 1);
_out31 = vmlaq_lane_f32(_out31, _v, vget_low_f32(_k3), 1);
vst1q_f32(outptr3 + 1, _out31);
float32x4_t _out32 = vld1q_f32(outptr3 + 2);
_out32 = vmlaq_lane_f32(_out32, _v, vget_high_f32(_k3), 0);
vst1q_f32(outptr3 + 2, _out32);
float32x4_t _out33 = vld1q_f32(outptr3 + 3);
_out33 = vmlaq_lane_f32(_out33, _v, vget_high_f32(_k3), 1);
vst1q_f32(outptr3 + 3, _out33);
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
}
}
static void deconv4x4s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i*2);
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3<w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
// row 0
float32x4x2_t _out0 = vld2q_f32(outptr0);
// 0,2,4,6
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_low_f32(_k0), 0);
// 1,3,5,7
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_low_f32(_k0), 1);
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
// 2,4,6,8
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_high_f32(_k0), 0);
// 3,5,7,9
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_high_f32(_k0), 1);
vst2q_f32(outptr0 + 2, _out0);
// row 1
float32x4x2_t _out1 = vld2q_f32(outptr1);
// 0,2,4,6
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_low_f32(_k1), 0);
// 1,3,5,7
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_low_f32(_k1), 1);
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
// 2,4,6,8
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_high_f32(_k1), 0);
// 3,5,7,9
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_high_f32(_k1), 1);
vst2q_f32(outptr1 + 2, _out1);
// row 2
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_low_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_low_f32(_k2), 1);
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_high_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_high_f32(_k2), 1);
vst2q_f32(outptr2 + 2, _out2);
// row 3
float32x4x2_t _out3 = vld2q_f32(outptr3);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_low_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_low_f32(_k3), 1);
vst2q_f32(outptr3, _out3);
_out3 = vld2q_f32(outptr3 + 2);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_high_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_high_f32(_k3), 1);
vst2q_f32(outptr3 + 2, _out3);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
}
}
}
}
|
GB_binop__div_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint8)
// A*D function (colscale): GB (_AxD__div_uint8)
// D*A function (rowscale): GB (_DxB__div_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint8)
// C=scalar+B GB (_bind1st__div_uint8)
// C=scalar+B' GB (_bind1st_tran__div_uint8)
// C=A+scalar GB (_bind2nd__div_uint8)
// C=A'+scalar GB (_bind2nd_tran__div_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nested_loop.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
#define TRIALS 1
#define N 960
int main() {
int fail = 0;
double A[N], B[N], C[N];
for (int i = 0; i < N; i++) {
A[i] = 0.0;
B[i] = 0.0;
C[i] = 1.0;
}
int nte = 32;
int tl = 6;
int blockSize = 5;
int Inner=0, Outer=0;
int wayout=0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom: Inner, Outer, wayout)
#pragma omp teams num_teams(1 ) thread_limit(tl)
{ wayout =1;
#pragma omp parallel for
for(int i = 0 ; i < blockSize; i++) {
A[i] += B[i] + C[i];
Inner = 1;
}
Outer = 1;
}
}
printf("Inner=%d Outer=%d wayout=%d\n", Inner, Outer, wayout);
if(Inner==1 && Outer==1 && wayout==1){
printf("Succeeded\n");
return 0;
} else {
printf("Failed\n");
return 1;
}
}
|
GB_binop__second_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_01__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_03__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fc64)
// A*D function (colscale): GB (_AxD__second_fc64)
// D*A function (rowscale): GB (_DxB__second_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC64 || GxB_NO_SECOND_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t3+Nx,256),floord(Nt+Nx-4,256)),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
Par-13-ParForNestedParForNestedParFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {1,1,1,1};
int c[4] = {0,2,1,3};
for (int i = 0; i < 1; ++i) {
if (i < 2) {
return -1;
}
}
#pragma omp parallel for
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
#pragma omp parallel for
for(int j = 0; j < 4; ++j) {
b[j] += a[i];
#pragma omp parallel for
for(int k = 0; k < 4; ++k) {
c[k] = a[i] * b[k] + c[k];
}
}
}
return 0;
}
|
fwt_parfor.c | /*
Tiled version of the Floyd-Warshall algorithm using
OpenMP parallel for.
command-line arguments: N, B
N = size of graph
B = size of tile
works only when N is a multiple of B
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#include "util.h"
#include <stdlib.h>
#include <stdio.h>
void write_arr_to_file(char *name, int **A, int N);
int** read_arr_from_file(int **A ,char *s , int N);
inline int min(int a, int b);
inline void FW(int **A, int K, int I, int J, int N);
void graph_init_random(int **adjm, int seed, int n, int m);
int main(int argc, char **argv)
{
int **A;
int i,j,k;
struct timeval t1, t2;
double time;
int B=64;
int N=1024;
if (argc != 3){
fprintf(stdout, "Usage %s N B\n", argv[0]);
exit(0);
}
N=atoi(argv[1]);
B=atoi(argv[2]);
A=(int **)malloc(N*sizeof(int *));
for(i=0; i<N; i++)A[i]=(int *)malloc(N*sizeof(int));
graph_init_random_par(A,-1,N,128*N);
//A=read_arr_from_file(A, "./cor_input",N);
gettimeofday(&t1,0);
for(k=0;k<N;k+=B){
FW(A,k,k,k,B);
#pragma omp parallel shared(A, B, k)
{
#pragma omp for nowait
for(i=0; i<k; i+=B)
FW(A,k,i,k,B);
#pragma omp for nowait
for(i=k+B; i<N; i+=B)
FW(A,k,i,k,B);
#pragma omp for nowait
for(j=0; j<k; j+=B)
FW(A,k,k,j,B);
#pragma omp for nowait
for(j=k+B; j<N; j+=B)
FW(A,k,k,j,B);
}
#pragma omp parallel shared(A, B, k)
{
#pragma omp for collapse(2) nowait
for(i=0; i<k; i+=B)
for(j=0; j<k; j+=B)
FW(A,k,i,j,B);
#pragma omp for collapse(2) nowait
for(i=0; i<k; i+=B)
for(j=k+B; j<N; j+=B)
FW(A,k,i,j,B);
#pragma omp for collapse(2) nowait
for(i=k+B; i<N; i+=B)
for(j=0; j<k; j+=B)
FW(A,k,i,j,B);
#pragma omp for collapse(2) nowait
for(i=k+B; i<N; i+=B)
for(j=k+B; j<N; j+=B)
FW(A,k,i,j,B);
}
}
gettimeofday(&t2,0);
time=(double)((t2.tv_sec-t1.tv_sec)*1000000+t2.tv_usec-t1.tv_usec)/1000000;
printf("FW_TILED,%d,%d,%.4f\n", N,B,time);
write_arr_to_file("out_t_tasks", A, N);
/*
for(i=0; i<N; i++)
for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]);
*/
return 0;
}
inline int min(int a, int b)
{
if(a<=b)return a;
else return b;
}
inline void FW(int **A, int K, int I, int J, int N)
{
int i,j,k;
for(k=K; k<K+N; k++)
for(i=I; i<I+N; i++)
for(j=J; j<J+N; j++)
A[i][j]=min(A[i][j], A[i][k]+A[k][j]);
}
void write_arr_to_file(char *name, int **A, int N)
{
FILE *f;
f = fopen(name, "w");
int i,j;
for(i=0; i<N; i++){
for(j=0; j<N; j++){
fprintf(f, "%d ", A[i][j]);
}
fprintf(f ,"\n");
}
}
int** read_arr_from_file(int **A ,char *s , int N){
FILE *myFile;
myFile = fopen(s, "r");
A = (int **)malloc(N * sizeof(int *));
int i,j;
for(i=0; i<N; i++){
A[i] = (int *)malloc(N * sizeof(int));
}
for( i=0 ;i<N;i++){
for (j=0 ;j<N;j++){
fscanf(myFile, "%d", &A[i][j]);
}
}
return A;
}
void graph_init_random(int **adjm, int seed, int n, int m)
{
unsigned int i, j;
srand48(seed);
#pragma omp parallel for collapse(2)
for(i=0; i<n; i++)
for(j=0; j<n; j++)
adjm[i][j] = abs((( int)lrand48()) % 1048576);
for(i=0; i<n; i++)adjm[i][i]=0;
}
|
GB_binop__bor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int64)
// C=scalar+B GB (_bind1st__bor_int64)
// C=scalar+B' GB (_bind1st_tran__bor_int64)
// C=A+scalar GB (_bind2nd__bor_int64)
// C=A'+scalar GB (_bind2nd_tran__bor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_zlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_lauum
*
* Computes the product U * U^H or L^H * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular factor U or L.
* On exit, if uplo = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^H;
* if uplo = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^H * L.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] info
* - 0 on successful exit
* - < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
int core_zlauum(plasma_enum_t uplo,
int n,
plasma_complex64_t *A, int lda)
{
return LAPACKE_zlauum_work(LAPACK_COL_MAJOR,
lapack_const(uplo), n, A, lda);
}
/******************************************************************************/
void core_omp_zlauum(plasma_enum_t uplo,
int n,
plasma_complex64_t *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = core_zlauum(uplo, n, A, lda);
if (info != PlasmaSuccess) {
coreblas_error("core_zlauum() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
map_reduce_task.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <omp.h>
#include "pointers/pointer.h"
#include "tasks/task.h"
namespace flash {
template<class InType, class OutType>
class MapTask : public BaseTask {
typedef std::function<OutType(const InType &)> MapFnType;
MapFnType map_fn;
flash_ptr<InType> in_fptr;
flash_ptr<OutType> out_fptr;
FBLAS_UINT len;
public:
MapTask(MapFnType &mapfn, flash_ptr<InType> base_in,
flash_ptr<OutType> base_out, FBLAS_UINT start_idx,
FBLAS_UINT blk_size)
: map_fn(mapfn) {
this->len = blk_size;
this->in_fptr = base_in + start_idx;
this->out_fptr = base_out + start_idx;
StrideInfo sinfo{0, 0, 0};
sinfo.n_strides = 1;
sinfo.len_per_stride = blk_size * sizeof(InType);
this->add_read(in_fptr, sinfo);
sinfo.len_per_stride = blk_size * sizeof(OutType);
this->add_write(out_fptr, sinfo);
}
void execute() {
InType * in_ptr = (InType *) this->in_mem_ptrs[this->in_fptr];
OutType *out_ptr = (OutType *) this->in_mem_ptrs[this->out_fptr];
#pragma omp parallel for
for (FBLAS_UINT i = 0; i < len; i++) {
out_ptr[i] = this->map_fn(in_ptr[i]);
}
}
FBLAS_UINT size() {
return this->len * (sizeof(InType) + sizeof(OutType));
}
};
template<class T>
class ReduceTask : public BaseTask {
typedef std::function<T(T &, T &)> OpType;
OpType op;
flash_ptr<T> in_fptr;
T id;
T result;
FBLAS_UINT len;
public:
ReduceTask(OpType &op, flash_ptr<T> base_in, T id, FBLAS_UINT start_idx,
FBLAS_UINT blk_size)
: op(op), id(id), result(id) {
this->len = blk_size;
this->in_fptr = base_in + start_idx;
StrideInfo sinfo{0, 0, 0};
sinfo.n_strides = 1;
sinfo.len_per_stride = blk_size * sizeof(T);
this->add_read(this->in_fptr, sinfo);
}
void execute() {
T *in_ptr = (T *) this->in_mem_ptrs[this->in_fptr];
GLOG_ASSERT(in_ptr != nullptr, "null input to ReduceTask");
FBLAS_UINT n_threads = omp_get_num_threads();
FBLAS_UINT thread_blk_size = ROUND_UP(this->len, n_threads) / n_threads;
this->result = id;
#pragma omp parallel for num_threads(n_threads)
for (FBLAS_UINT i = 0; i < n_threads; i++) {
T l_res(id);
FBLAS_UINT start = i * thread_blk_size;
FBLAS_UINT end = std::min((i + 1) * thread_blk_size, this->len);
for (FBLAS_UINT j = start; j < end; j++) {
l_res = op(l_res, in_ptr[j]);
}
// add local result
#pragma omp critical
{ this->result = op(this->result, l_res); }
}
}
T get_result() {
return this->result;
}
FBLAS_UINT size() {
return (this->len + omp_get_num_threads() + 1) * sizeof(T);
}
};
} // namespace flash |
kmeans.h | #ifndef SRC_CLUSTERING_KMEANS_H_
#define SRC_CLUSTERING_KMEANS_H_
#include "clustering/kmeans_trait.h"
#include "util/alignment.h"
#include <limits>
#include <random>
#include <vector>
namespace GraphSfM {
/**
* @brief Kind of initialization of the kmeans centers
*/
enum class KMeansInitType
{
KMEANS_INIT_RANDOM, // Standard Llyod algoritm
KMEANS_INIT_PP, // K-means++ initialization
};
/**
* @brief Compute minimum distance to any center
* @param pts Input points
* @param centers Centers
* @param[out] dists computed (minimum) distance to any center
*/
template<typename DataType>
void MinimumDistanceToAnyCenter(
const std::vector<DataType>& pts,
const std::vector<DataType>& centers,
std::vector<typename KMeansVectorDataTrait<DataType>::scalar_type>& dists )
{
using trait = KMeansVectorDataTrait<DataType>;
dists.resize(pts.size(), std::numeric_limits<typename trait::scalar_type>::max());
#pragma omp parallel for
for (int id_pt = 0; id_pt < static_cast<int>(pts.size()); ++id_pt) {
const auto & pt = pts[id_pt];
for (const auto& c : centers) {
const typename trait::scalar_type cur_d = trait::L2(pt, c);
dists[id_pt] = std::min(dists[id_pt], cur_d);
}
}
}
/**
* @brief Compute Nearest center Id of a given point
* @param pt Query point
* @param centers list of test centers
* @return id of the nearest center (0-based)
*/
template<typename DataType>
uint32_t NearestCenterID(const DataType& pt,
const std::vector<DataType>& centers)
{
using trait = KMeansVectorDataTrait<DataType>;
const uint32_t nb_cluster = static_cast<uint32_t>(centers.size());
typename trait::scalar_type min_dist = std::numeric_limits<typename trait::scalar_type>::max();
uint32_t nearest_center = nb_cluster;
for (uint32_t cur_center = 0; cur_center < nb_cluster; ++cur_center) {
const typename trait::scalar_type cur_dist = trait::L2(pt, centers[cur_center]);
if (cur_dist < min_dist) {
min_dist = cur_dist;
nearest_center = cur_center;
}
}
return nearest_center;
}
/**
* @brief Compute center of mass of a set a points
* @param pts List of points
* @param assigned_center Id of the center to be affected to a given point
* @param nb_center Number of center of mass in the result
* @return New centers of mass
*/
template<typename DataType>
std::vector<DataType> ComputeCenterOfMass(const std::vector<DataType>& pts,
const std::vector<uint32_t>& assigned_center,
const uint32_t nb_center)
{
using trait = KMeansVectorDataTrait<DataType>;
std::vector<DataType> new_centers(nb_center, trait::null(pts[0]));
std::vector<uint32_t> nb_per_center(nb_center, 0);
// Affect points to centers
for ( size_t id_pt = 0; id_pt < pts.size(); ++id_pt) {
const uint32_t id_center = assigned_center[id_pt];
trait::accumulate(new_centers[id_center], pts[id_pt]);
++nb_per_center[id_center];
}
// Compute mean of centers based on the number of points affected to each centers
#pragma omp parallel for
for (int id_center = 0; id_center < static_cast<int>(nb_center); ++id_center) {
trait::divide(new_centers[id_center], nb_per_center[id_center]);
}
return new_centers;
}
/**
* @brief Compute simple kmeans clustering on specified data
* @param source_data Input data
* @param[out] cluster_assignment index for each point in the input set to a specified cluster
* @param[out] centers Centers of the clusters
* @param nb_cluster requested number of cluster in the output
* @param max_nb_iteration maximum number of iteration to do for clustering
* @note This is the standard llyod algorithm
*/
template <typename DataType>
void KMeans(const std::vector<DataType>& source_data,
std::vector<uint32_t>& cluster_assignment,
std::vector<DataType>& centers,
const uint32_t nb_cluster,
const uint32_t max_nb_iteration = std::numeric_limits<uint32_t>::max(),
const KMeansInitType init_type = KMeansInitType::KMEANS_INIT_PP)
{
if (source_data.size() == 0) {
return;
}
using trait = KMeansVectorDataTrait<DataType>;
std::mt19937_64 rng(std::mt19937_64::default_seed);
// 1 - init center of mass
if (init_type == KMeansInitType::KMEANS_INIT_PP) {
// Kmeans++ init:
// first one is a random one
// the others based on the importance probability (Di / \sum_i Di) where:
// Di is the minimum distance to any created centers already created
std::uniform_int_distribution<size_t> distrib_first(0, source_data.size() - 1);
centers.reserve(nb_cluster);
centers.emplace_back(source_data[ distrib_first( rng )]);
std::vector<typename trait::scalar_type> dists;
for (uint32_t id_center = 1; id_center < nb_cluster; ++id_center) {
// Compute Di / \sum Di pdf
MinimumDistanceToAnyCenter(source_data, centers, dists);
std::discrete_distribution<size_t> distrib_c(dists.cbegin(), dists.cend());
// Sample a point from this distribution
centers.emplace_back(source_data[distrib_c(rng)]);
}
} else if (init_type == KMeansInitType::KMEANS_INIT_RANDOM) {
DataType min, max;
trait::minMax(source_data, min, max);
// Standard Llyod init
centers.resize(nb_cluster);
std::uniform_int_distribution<size_t> distrib(0, source_data.size() - 1);
for (auto & cur_center : centers) {
cur_center = source_data[distrib(rng)];
}
} else { // Invalid Kmeans initialization type
return;
}
// Assign all element to the first center
cluster_assignment.resize(source_data.size(), nb_cluster);
bool changed;
uint32_t id_iteration = 0;
// 2 - Perform kmeans
do {
changed = false;
// 2.1 affect center to each points
#pragma omp parallel for shared(changed)
for (int id_pt = 0; id_pt < static_cast<int>(source_data.size()); ++id_pt) {
const DataType & cur_pt = source_data[id_pt];
// Compute nearest center of this point
const uint32_t nearest_center = NearestCenterID(cur_pt, centers);
if (cluster_assignment[id_pt] != nearest_center) {
cluster_assignment[id_pt] = nearest_center;
changed = true;
}
}
// 2.2 Compute new centers of mass
centers = ComputeCenterOfMass(source_data, cluster_assignment, nb_cluster);
++id_iteration;
} while(changed && id_iteration < max_nb_iteration);
}
} // namespace GraphSfM
#endif
|
sequences.c | #include "sequences.h"
/* preprocess_db function preprocess the database sequences named input_filename. The preprocessed database filenames start with out_filename. */
void preprocess_db (char * input_filename, char * out_filename, int n_procs) {
unsigned long int sequences_count=0, D=0, disp, accum, chunk_size, i, j, k;
unsigned short int *sequences_lengths=NULL, * title_lengths=NULL, length=0, tmp_length, ok;
char ** sequences=NULL, **titles=NULL, buffer[BUFFER_SIZE], filename[BUFFER_SIZE], * bin_filename, * res, *tmp_seq, *b=NULL, diff, new_line='\n';
FILE * sequences_file, *titles_file, *info_file, * bin_file;
int max_title_length;
double tick= dwalltime();
// open dabatase sequence filename
sequences_file = fopen(input_filename,"r");
if (sequences_file == NULL) {
printf("SWIMM: An error occurred while opening input sequence file.\n");
exit(2);
}
// Allocate memory for sequences_lengths array
sequences_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int));
title_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int));
// Calculate number of sequences in database and its lengths
sequences_count=0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while (res != NULL) {
length = 0;
// read title
while (strrchr(buffer,new_line) == NULL) {
length += strlen(buffer);
res = fgets(buffer,BUFFER_SIZE,sequences_file);
}
title_lengths[sequences_count] = length + strlen(buffer) + 1;
// read sequence
length = 0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while ((res != NULL) && (buffer[0] != '>')) {
length += strlen(buffer)-1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
}
sequences_lengths[sequences_count] = length;
(sequences_count)++;
if ((sequences_count) % ALLOCATION_CHUNK == 0) {
sequences_lengths = (unsigned short int *) realloc(sequences_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int));
title_lengths = (unsigned short int *) realloc(title_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int));
}
}
// Allocate memory for sequences array
sequences = (char **) malloc(sequences_count*sizeof(char *));
if (sequences == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); }
for (i=0; i<sequences_count; i++ ) {
sequences[i] = (char *) malloc(sequences_lengths[i]*sizeof(char));
if (sequences[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); }
}
// Rewind sequences database file
rewind(sequences_file);
// Read sequences from the database file and load them in sequences array
i = 0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while (res != NULL) {
// read title
while (strrchr(buffer,new_line) == NULL)
res = fgets(buffer,BUFFER_SIZE,sequences_file);
// read sequence
length = 1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while ((res != NULL) && (buffer[0] != '>')) {
//printf("%s %d\n",buffer,strlen(buffer));
strncpy(sequences[i]+(length-1),buffer,strlen(buffer)-1);
length += strlen(buffer)-1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
}
i++;
}
// Rewind sequences database file
rewind(sequences_file);
// Allocate memory for titles array
titles = (char **) malloc(sequences_count*sizeof(char *));
if (titles == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); }
for (i=0; i<sequences_count; i++ ) {
titles[i] = (char *) malloc(title_lengths[i]*sizeof(char));
if (titles[i] == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); }
}
// calculate max title length
max_title_length = 0;
for (i=0; i<sequences_count ; i++)
max_title_length = (max_title_length > title_lengths[i] ? max_title_length : title_lengths[i]);
// free memory
free(title_lengths);
// read sequence headers
i = 0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while (res != NULL) {
// discard sequences
while ((res != NULL) && (buffer[0] != '>'))
res = fgets(buffer,BUFFER_SIZE,sequences_file);
if (res != NULL){
// read header
length = 1;
do{
strncpy(titles[i]+(length-1),buffer,strlen(buffer)-1);
length += strlen(buffer)-1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
} while (strrchr(buffer,new_line) == NULL);
titles[i][length] = '\0';
i++;
}
}
// Close sequences database file
fclose(sequences_file);
// Sort sequence array by length
sort_sequences(sequences,titles,sequences_lengths, sequences_count, n_procs);
// Create titles file: this text file contains the sequences description
sprintf(filename,"%s.desc",out_filename);
titles_file = fopen(filename,"w");
if (titles_file == NULL) {
printf("SWIMM: An error occurred while opening sequence header file.\n");
exit(2);
}
// write titles
for (i=0; i<sequences_count ; i++)
fprintf(titles_file,"%s\n",titles[i]);
// close titles file
fclose(titles_file);
// calculate total number of residues
#pragma omp parallel for reduction(+:D) num_threads(n_procs)
for (i=0; i< sequences_count; i++ )
D = D + sequences_lengths[i];
// transform bidimensional sequence array to a unidimensional one
b = (char *) malloc(D*sizeof(char));
if (b == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); }
disp = 0;
for (i=0; i< sequences_count; i++ ) {
memcpy(b+disp,sequences[i],sequences_lengths[i]);
disp += sequences_lengths[i];
}
// Free memory
for (i=0; i< sequences_count; i++ )
free(sequences[i]);
free(sequences);
// preprocess vect sequences DB
// original alphabet: 'A'..'Z' => preprocessed alphabet: 0..24 (J, O and U are replaced with dummy symbol)
#pragma omp parallel for private(diff) num_threads(n_procs) schedule(dynamic)
for (i=0; i< D; i++) {
b[i] = ((b[i] == 'J') ? DUMMY_ELEMENT : b[i]);
b[i] = ((b[i] == 'O') ? DUMMY_ELEMENT : b[i]);
b[i] = ((b[i] == 'U') ? DUMMY_ELEMENT : b[i]);
diff = 'A';
diff = (b[i] > 'J' ? diff+1 : diff);
diff = (b[i] > 'O' ? diff+1 : diff);
diff = (b[i] > 'U' ? diff+1 : diff);
b[i] -= diff;
}
// Create info file: this file contains sequences count, number of residues and the maximum title length
sprintf(filename,"%s.info",out_filename);
info_file = fopen(filename,"w");
if (info_file == NULL) {
printf("SWIMM: An error occurred while opening info file.\n");
exit(2);
}
// Write info
fprintf(info_file,"%ld %ld %d",sequences_count,D,max_title_length);
// close info file
fclose(info_file);
// Create sequences binary file: this file contains first the sequences lengths and then the preprocessed sequences residues
sprintf(filename,"%s.seq",out_filename);
bin_file = fopen(filename,"wb");
if (bin_file == NULL) {
printf("SWIMM: An error occurred while opening sequence file.\n");
exit(2);
}
// Write vectorized sequences lengths
fwrite(sequences_lengths,sizeof(unsigned short int),sequences_count,bin_file);
//Write sequences
fwrite(b,sizeof(char),D,bin_file);
// Close bin file
fclose(bin_file);
// free memory
free(sequences_lengths);
free(b);
printf("\nSWIMM v%s\n\n",VERSION);
printf("Database file:\t\t\t %s\n",input_filename);
printf("Database size:\t\t\t%ld sequences (%ld residues) \n",sequences_count,D);
printf("Preprocessed database name:\t%s\n",out_filename);
printf("Preprocessing time:\t\t%lf seconds\n\n",dwalltime()-tick);
}
// Load query sequence from file in a
void load_query_sequences(char * queries_filename, char ** ptr_query_sequences, char *** ptr_query_headers, unsigned short int **ptr_query_sequences_lengths,
unsigned short int **ptr_m, unsigned long int * query_sequences_count, unsigned long int * ptr_Q, unsigned int ** ptr_query_sequences_disp, int n_procs) {
long int i, j, k;
unsigned long int sequences_count=0, Q=0, disp, accum, chunk_size;
unsigned int * sequences_disp;
unsigned short int *sequences_lengths, *m, * title_lengths, *tmp, length=0, tmp_length, ok;
char ** sequences=NULL, **titles, buffer[BUFFER_SIZE], filename[BUFFER_SIZE], * bin_filename, * res, *tmp_seq, *a, diff, new_line='\n';
FILE * sequences_file;
// open query sequence filename
sequences_file = fopen(queries_filename,"r");
if (sequences_file == NULL) {
printf("SWIMM: An error occurred while opening input sequence file.\n");
exit(2);
}
// Allocate memory for sequences_lengths array
sequences_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int));
title_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int));
// Calculate number of sequences in database and its lengths
sequences_count=0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while (res != NULL) {
length = 0;
// read title
while (strrchr(buffer,new_line) == NULL) {
length += strlen(buffer);
res = fgets(buffer,BUFFER_SIZE,sequences_file);
}
title_lengths[sequences_count] = length + strlen(buffer) + 1;
// read sequence
length = 0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while ((res != NULL) && (buffer[0] != '>')) {
length += strlen(buffer)-1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
}
sequences_lengths[sequences_count] = length;
(sequences_count)++;
if ((sequences_count) % ALLOCATION_CHUNK == 0) {
sequences_lengths = (unsigned short int *) realloc(sequences_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int));
title_lengths = (unsigned short int *) realloc(title_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int));
}
}
// copy lengths to aligned buffer
tmp = sequences_lengths;
m = (unsigned short int *) _mm_malloc (sequences_count*sizeof(unsigned short int), MEMALIGN);
sequences_lengths = (unsigned short int *) _mm_malloc (sequences_count*sizeof(unsigned short int), MEMALIGN);
memcpy(m,tmp,sequences_count*sizeof(unsigned short int));
memcpy(sequences_lengths,tmp,sequences_count*sizeof(unsigned short int));
free(tmp);
// Allocate memory for sequences array
sequences = (char **) malloc(sequences_count*sizeof(char *));
if (sequences == NULL) { printf("SWIMM: An error occurred while allocating memory for query sequences.\n"); exit(1); }
for (i=0; i<sequences_count; i++ ) {
sequences[i] = (char *) malloc(sequences_lengths[i]*sizeof(char));
if (sequences[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); }
}
// Rewind sequences database file
rewind(sequences_file);
// Read sequences from the database file and load them in sequences array
i = 0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while (res != NULL) {
// read title
while (strrchr(buffer,new_line) == NULL)
res = fgets(buffer,BUFFER_SIZE,sequences_file);
// read sequence
length = 1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while ((res != NULL) && (buffer[0] != '>')) {
//printf("%s %d\n",buffer,strlen(buffer));
strncpy(sequences[i]+(length-1),buffer,strlen(buffer)-1);
length += strlen(buffer)-1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
}
i++;
}
// Rewind sequences database file
rewind(sequences_file);
// Allocate memory for titles array
titles = (char **) malloc(sequences_count*sizeof(char *));
if (titles == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); }
for (i=0; i<sequences_count; i++ ) {
titles[i] = (char *) malloc(title_lengths[i]*sizeof(char));
if (titles[i] == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); }
}
i = 0;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
while (res != NULL) {
// discard sequences
while ((res != NULL) && (buffer[0] != '>'))
res = fgets(buffer,BUFFER_SIZE,sequences_file);
if (res != NULL){
// read header
length = 1;
do{
strncpy(titles[i]+(length-1),buffer,strlen(buffer)-1);
length += strlen(buffer)-1;
res = fgets(buffer,BUFFER_SIZE,sequences_file);
} while (strrchr(buffer,new_line) == NULL);
titles[i][length] = '\0';
i++;
}
}
// Close sequences database file
fclose(sequences_file);
// Sort sequence array by length
sort_sequences(sequences,titles,sequences_lengths, sequences_count, n_procs);
// calculate total number of residues
#pragma omp parallel for reduction(+:Q) num_threads(n_procs)
for (i=0; i< sequences_count; i++ )
Q = Q + (ceil( (double) sequences_lengths[i] / (double) QUERY_SEQ_LEN_MULT) * QUERY_SEQ_LEN_MULT);
*ptr_Q = Q;
a = (char *) _mm_malloc(Q*sizeof(char), MEMALIGN);
if (a == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); }
disp = 0;
for (i=0; i< sequences_count; i++ ) {
// copy query sequence
memcpy(a+disp,sequences[i],sequences_lengths[i]);
// if length is not multiple of QUERY_SEQ_LEN_MULT, then make it multiple and copy dummy element at last position
tmp_length = ceil( (double) sequences_lengths[i] / (double) QUERY_SEQ_LEN_MULT) * QUERY_SEQ_LEN_MULT;
for (j=sequences_lengths[i]; j<tmp_length; j++)
a[disp+j]=DUMMY_ELEMENT;
// update query length
m[i] = tmp_length;
// update disp
disp += m[i];
}
// process vect sequences DB
#pragma omp parallel for private(diff) num_threads(n_procs) schedule(dynamic)
for (i=0; i< Q; i++) {
a[i] = ((a[i] == 'J') ? DUMMY_ELEMENT : a[i]);
a[i] = ((a[i] == 'O') ? DUMMY_ELEMENT : a[i]);
a[i] = ((a[i] == 'U') ? DUMMY_ELEMENT : a[i]);
diff = 'A';
diff = (a[i] > 'J' ? diff+1 : diff);
diff = (a[i] > 'O' ? diff+1 : diff);
diff = (a[i] > 'U' ? diff+1 : diff);
a[i] -= diff;
}
// Calculate displacement for current sequences db
sequences_disp = (unsigned int *) _mm_malloc((sequences_count+1)*sizeof(unsigned int), MEMALIGN);
sequences_disp[0] = 0;
for (i=1; i < sequences_count+1; i++)
sequences_disp[i] = sequences_disp[i-1] + m[i-1];
*ptr_query_sequences = a;
*ptr_query_sequences_lengths = sequences_lengths;
*ptr_m = m;
*ptr_query_sequences_disp = sequences_disp;
*ptr_query_headers = titles;
*query_sequences_count = sequences_count;
// Free memory
for (i=0; i< sequences_count; i++ )
free(sequences[i]);
free(sequences);
free(title_lengths);
}
void assemble_single_chunk_db (char * sequences_filename, int vector_length, unsigned long int * sequences_count,
unsigned long int * D, unsigned short int * sequences_db_max_length, int * max_title_length, unsigned long int * vect_sequences_db_count,
unsigned long int * vD, char **ptr_vect_sequences_db, unsigned short int ** ptr_vect_sequences_db_lengths, unsigned short int ** ptr_vect_sequences_db_blocks,
unsigned long int ** ptr_vect_sequences_db_disp, int n_procs, int block_width) {
char ** sequences, *s, filename[200], ** sequences_db_headers, *header, *b;
unsigned short int * vect_sequences_lengths, * sequences_lengths, * vect_sequences_blocks;
unsigned long int i, j, k, accum, aux_vD=0, *vect_sequences_disp;
FILE * sequences_file, * info_file;
// Open info file
sprintf(filename,"%s.info",sequences_filename);
info_file = fopen(filename,"r");
if (info_file == NULL) {
printf("SWIMM: An error occurred while opening info file.\n");
exit(2);
}
fscanf(info_file,"%ld %ld %d",sequences_count,D,max_title_length);
fclose(info_file);
// Open sequences file
sprintf(filename,"%s.seq",sequences_filename);
sequences_file = fopen(filename,"rb");
if (sequences_file == NULL) {
printf("SWIMM: An error occurred while opening info file.\n");
exit(2);
}
// Read sequences lengths
sequences_lengths = (unsigned short int *) malloc((*sequences_count)*sizeof(unsigned short int));
fread(sequences_lengths,sizeof(unsigned short int),*sequences_count,sequences_file);
// Read sequences
s = (char *) malloc((*D)*sizeof(char));
fread(s,sizeof(char),*D,sequences_file);
fclose(sequences_file);
sequences = (char **) malloc((*sequences_count)*sizeof(char *));
sequences[0] = s;
for (i=1; i<*sequences_count ; i++)
sequences[i] = sequences[i-1] + sequences_lengths[i-1];
// calculate vect_sequences_count
*vect_sequences_db_count = ceil( (double) (*sequences_count) / (double) vector_length);
// Allocate memory for vect_sequences_lengths
vect_sequences_lengths = (unsigned short int *) _mm_malloc((*vect_sequences_db_count)*sizeof(unsigned short int),MEMALIGN);
if (vect_sequences_lengths == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); }
vect_sequences_disp = (unsigned long int *) _mm_malloc((*vect_sequences_db_count+1)*sizeof(unsigned long int),MEMALIGN);
if (vect_sequences_disp == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); }
vect_sequences_blocks = (unsigned short int *) _mm_malloc((*vect_sequences_db_count)*sizeof(unsigned short int),MEMALIGN);
if (vect_sequences_blocks == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); }
// calculate values for vect_sequences_lengths array
for (i=0; i< *vect_sequences_db_count - 1; i++ )
vect_sequences_lengths[i] = sequences_lengths[(i+1)*vector_length-1];
vect_sequences_lengths[*vect_sequences_db_count-1] = sequences_lengths[*sequences_count-1];
// make length multiple of 4 to allow 32/64 bytes aligned data
for (i=0; i< *vect_sequences_db_count; i++ )
vect_sequences_lengths[i] = ceil( (double) vect_sequences_lengths[i] / (double) DB_SEQ_LEN_MULT) * DB_SEQ_LEN_MULT;
for (i=0; i< *vect_sequences_db_count; i++ )
vect_sequences_blocks[i] = ceil( (double) vect_sequences_lengths[i] / (double) block_width);
#pragma omp parallel for reduction(+:aux_vD) num_threads(n_procs)
for (i=0; i< *vect_sequences_db_count; i++ )
aux_vD = aux_vD + vect_sequences_lengths[i]*vector_length;
*vD = aux_vD;
b = (char *) _mm_malloc((*vD)*sizeof(char),MEMALIGN);
// Calculate displacement for current sequences db
vect_sequences_disp[0] = 0;
for (k=1; k < *vect_sequences_db_count+1; k++)
vect_sequences_disp[k] = vect_sequences_disp[k-1] + (vect_sequences_lengths[k-1]*vector_length);
// Copy sequences db to host buffers reordering elements to get better locality when computing alignments
for (i=0; i < *vect_sequences_db_count-1; i++) {
for (j=0; j< vect_sequences_lengths[i]; j++ ) {
for (k=0;k< vector_length; k++)
if (j < sequences_lengths[i*vector_length+k])
*(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j];
else
*(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT;
}
}
//rest = sequences_count % vector_length;
for (i=*vect_sequences_db_count-1, j=0; j< vect_sequences_lengths[i]; j++ ) {
for (k=0;k< vector_length; k++)
if (i*vector_length+k < *sequences_count){
if (j < sequences_lengths[i*vector_length+k])
*(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j];
else
*(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT;
} else
*(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT;
}
*ptr_vect_sequences_db = b;
*ptr_vect_sequences_db_lengths = vect_sequences_lengths;
*ptr_vect_sequences_db_blocks = vect_sequences_blocks;
*ptr_vect_sequences_db_disp = vect_sequences_disp;
*sequences_db_max_length = sequences_lengths[*sequences_count-1];
free(s);
free(sequences);
free(sequences_lengths);
}
void load_database_headers (char * sequences_filename, unsigned long int sequences_count, int max_title_length, char *** ptr_sequences_db_headers) {
char ** sequences_db_headers, filename[200], * header;
FILE * header_file;
unsigned long int i;
// Load sequence headers
// Open header file
sprintf(filename,"%s.desc",sequences_filename);
header_file = fopen(filename,"r");
if (header_file == NULL) {
printf("SWIMM: An error occurred while opening sequence description file.\n");
exit(3);
}
// Read sequences lengths
sequences_db_headers = (char **) malloc(sequences_count*sizeof(char *));
header = (char *) malloc((max_title_length+1)*sizeof(char));
for (i=0; i<sequences_count; i++){
fgets(header,max_title_length,header_file);
sequences_db_headers[i] = (char *) malloc((strlen(header)+1)*sizeof(char));
strcpy(sequences_db_headers[i],header);
}
fclose(header_file);
free(header);
*ptr_sequences_db_headers = sequences_db_headers;
}
void merge_sequences(char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size) {
unsigned long int i1 = 0;
unsigned long int i2 = size / 2;
unsigned long int it = 0;
// allocate memory for temporary buffers
char ** tmp1 = (char **) malloc(size*sizeof(char *));
char ** tmp2 = (char **) malloc(size*sizeof(char *));
unsigned short int * tmp3 = (unsigned short int *) malloc (size*sizeof(unsigned short int));
while(i1 < size/2 && i2 < size) {
if (sequences_lengths[i1] <= sequences_lengths[i2]) {
tmp1[it] = sequences[i1];
tmp2[it] = titles[i1];
tmp3[it] = sequences_lengths[i1];
i1++;
}
else {
tmp1[it] = sequences[i2];
tmp2[it] = titles[i2];
tmp3[it] = sequences_lengths[i2];
i2 ++;
}
it ++;
}
while (i1 < size/2) {
tmp1[it] = sequences[i1];
tmp2[it] = titles[i1];
tmp3[it] = sequences_lengths[i1];
i1++;
it++;
}
while (i2 < size) {
tmp1[it] = sequences[i2];
tmp2[it] = titles[i2];
tmp3[it] = sequences_lengths[i2];
i2++;
it++;
}
memcpy(sequences, tmp1, size*sizeof(char *));
memcpy(titles, tmp2, size*sizeof(char *));
memcpy(sequences_lengths, tmp3, size*sizeof(unsigned short int));
free(tmp1);
free(tmp2);
free(tmp3);
}
void mergesort_sequences_serial (char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size) {
char * tmp_seq;
unsigned short int tmp_seq_len;
if (size == 2) {
if (sequences_lengths[0] > sequences_lengths[1]) {
// swap sequences
tmp_seq = sequences[0];
sequences[0] = sequences[1];
sequences[1] = tmp_seq;
// swap titles
tmp_seq = titles[0];
titles[0] = titles[1];
titles[1] = tmp_seq;
// swap sequences lengths
tmp_seq_len = sequences_lengths[0];
sequences_lengths[0] = sequences_lengths[1];
sequences_lengths[1] = tmp_seq_len;
return;
}
} else {
if (size > 2){
mergesort_sequences_serial(sequences, titles, sequences_lengths, size/2);
mergesort_sequences_serial(sequences + size/2, titles + size/2, sequences_lengths + size/2, size - size/2);
merge_sequences(sequences, titles, sequences_lengths, size);
}
}
}
void sort_sequences (char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size, int threads) {
if ( threads == 1) {
mergesort_sequences_serial(sequences, titles, sequences_lengths, size);
}
else if (threads > 1) {
#pragma omp parallel sections
{
#pragma omp section
sort_sequences(sequences, titles, sequences_lengths, size/2, threads/2);
#pragma omp section
sort_sequences(sequences + size/2, titles + size/2, sequences_lengths + size/2, size-size/2, threads-threads/2);
}
merge_sequences(sequences, titles, sequences_lengths, size);
} // threads > 1
}
void load_tuning_query_sequence (char ** ptr_tun_query_sequences, unsigned short int ** ptr_tun_query_sequence_lengths, unsigned int ** ptr_tun_query_sequence_disps) {
unsigned short int * tun_query_sequence_lengths, lengths[TUNING_QUERY_COUNT] = {189, 375, 567, 729, 1000, 2005, 3005, 4061, 4743, 5478};
// unsigned short int * tun_query_sequence_lengths, lengths[TUNING_QUERY_COUNT] = {189, 567, 1000, 3005, 4743, 5478};
unsigned int * tun_query_sequence_disps, i, tunQ=0;
char * tun_query_sequences;
tun_query_sequence_lengths = _mm_malloc(TUNING_QUERY_COUNT*sizeof(unsigned short int), MEMALIGN);
tun_query_sequence_disps = _mm_malloc((TUNING_QUERY_COUNT+1)*sizeof(unsigned int), MEMALIGN);
// adapt query sequence length
for (i=0; i < TUNING_QUERY_COUNT ; i++) {
tun_query_sequence_lengths[i] = (lengths[i] / QUERY_SEQ_LEN_MULT) * QUERY_SEQ_LEN_MULT;
tunQ += tun_query_sequence_lengths[i];
}
tun_query_sequences = _mm_malloc(tunQ*sizeof(char), MEMALIGN);
// generate synthetic sequences
for (i=0; i < tunQ ; i++)
tun_query_sequences[i] = rand() % SUBMAT_ROWS;
// complete disps
tun_query_sequence_disps[0] = 0;
for (i=1; i <= TUNING_QUERY_COUNT ; i++)
tun_query_sequence_disps[i] = tun_query_sequence_disps[i-1] + tun_query_sequence_lengths[i-1];
*ptr_tun_query_sequences = tun_query_sequences;
*ptr_tun_query_sequence_lengths = tun_query_sequence_lengths;
*ptr_tun_query_sequence_disps = tun_query_sequence_disps;
}
void assemble_tuning_chunk_db (char ** ptr_tun_vect_db_sequences, unsigned short int ** ptr_tun_vect_db_sequences_lengths,
unsigned short int ** ptr_tun_vect_db_sequences_blocks, unsigned long int ** ptr_tun_vect_db_sequences_disp,
unsigned long int * ptr_tun_vect_db_sequences_count) {
char *tun_vect_db_sequences;
unsigned short int * tun_vect_db_sequences_lengths, * tun_vect_db_sequences_blocks, tun_db_seq_length;
unsigned long int i, j, k, tun_vect_db_sequences_count, tun_db_seq_size, tun_db_seq_count, *tun_vect_db_sequences_disp;
// adapt length of synthetic sequences
tun_db_seq_length = (TUNING_DB_SEQ_LENGTH / DB_SEQ_LEN_MULT) * DB_SEQ_LEN_MULT;
// calculate synthetic db seq size and count
tun_db_seq_size = (TUNING_DB_SEQ_SIZE / tun_db_seq_length) * tun_db_seq_length;
tun_db_seq_count = (tun_db_seq_size / tun_db_seq_length);
tun_db_seq_count = (tun_db_seq_count / VECTOR_LENGTH) * VECTOR_LENGTH;
// calculate synthetic vect db sequences count
tun_vect_db_sequences_count = tun_db_seq_count / VECTOR_LENGTH;
/* printf("\n db seq len %u",tun_db_seq_length);
printf("\n db seq size %lu",tun_db_seq_size);
printf("\n db seq count %lu",tun_db_seq_count);
printf("\n vect db seq count %lu",tun_vect_db_sequences_count);*/
// allocate memory for buffers
tun_vect_db_sequences = _mm_malloc(tun_db_seq_size*sizeof(char), MEMALIGN);
tun_vect_db_sequences_lengths = _mm_malloc(tun_vect_db_sequences_count*sizeof(unsigned short int), MEMALIGN);
tun_vect_db_sequences_blocks = _mm_malloc(tun_vect_db_sequences_count*sizeof(unsigned short int), MEMALIGN);
tun_vect_db_sequences_disp = _mm_malloc((tun_vect_db_sequences_count+1)*sizeof(unsigned long int), MEMALIGN);
// generate synthetic sequences
for (i=0; i < tun_db_seq_size ; i++)
tun_vect_db_sequences[i] = rand() % SUBMAT_ROWS;
// complete lengths
for (i=0; i < tun_vect_db_sequences_count ; i++)
tun_vect_db_sequences_lengths[i] = tun_db_seq_length;
// complete disps
tun_vect_db_sequences_disp[0] = 0;
for (i=1; i <= tun_vect_db_sequences_count ; i++)
tun_vect_db_sequences_disp[i] = tun_vect_db_sequences_disp[i-1] + tun_vect_db_sequences_lengths[i-1]*VECTOR_LENGTH;
*ptr_tun_vect_db_sequences = tun_vect_db_sequences;
*ptr_tun_vect_db_sequences_lengths = tun_vect_db_sequences_lengths;
*ptr_tun_vect_db_sequences_blocks = tun_vect_db_sequences_blocks;
*ptr_tun_vect_db_sequences_disp = tun_vect_db_sequences_disp;
*ptr_tun_vect_db_sequences_count = tun_vect_db_sequences_count;
}
|
command_line.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef COMMAND_LINE_H_
#define COMMAND_LINE_H_
#include <getopt.h>
#include <algorithm>
#include <cinttypes>
#include <iostream>
#include <string>
#include <type_traits>
#include <vector>
#include <omp.h>
/*
GAP Benchmark Suite
Class: CLBase
Author: Scott Beamer
Handles command line argument parsing
- Through inheritance, can add more options to object
- For example, most kernels will use CLApp
*/
class CLBase {
protected:
int argc_;
char** argv_;
std::string name_;
std::string get_args_ = "f:g:hk:su:";
std::vector<std::string> help_strings_;
int scale_ = -1;
int degree_ = 16;
std::string filename_ = "";
bool symmetrize_ = false;
bool uniform_ = false;
void AddHelpLine(char opt, std::string opt_arg, std::string text,
std::string def = "") {
const int kBufLen = 100;
char buf[kBufLen];
if (opt_arg != "")
opt_arg = "<" + opt_arg + ">";
if (def != "")
def = "[" + def + "]";
snprintf(buf, kBufLen, " -%c %-9s: %-54s%10s", opt, opt_arg.c_str(),
text.c_str(), def.c_str());
help_strings_.push_back(buf);
}
public:
CLBase(int argc, char** argv, std::string name = "") :
argc_(argc), argv_(argv), name_(name) {
AddHelpLine('h', "", "print this help message");
AddHelpLine('f', "file", "load graph from file");
AddHelpLine('s', "", "symmetrize input edge list", "false");
AddHelpLine('g', "scale", "generate 2^scale kronecker graph");
AddHelpLine('u', "scale", "generate 2^scale uniform-random graph");
AddHelpLine('k', "degree", "average degree for synthetic graph",
std::to_string(degree_));
}
bool ParseArgs() {
signed char c_opt;
extern char *optarg; // from and for getopt
while ((c_opt = getopt(argc_, argv_, get_args_.c_str())) != -1) {
HandleArg(c_opt, optarg);
}
if ((filename_ == "") && (scale_ == -1)) {
std::cout << "No graph input specified. (Use -h for help)" << std::endl;
return false;
}
if (scale_ != -1)
symmetrize_ = true;
return true;
}
void virtual HandleArg(signed char opt, char* opt_arg) {
switch (opt) {
case 'f': filename_ = std::string(opt_arg); break;
case 'g': scale_ = atoi(opt_arg); break;
case 'h': PrintUsage(); break;
case 'k': degree_ = atoi(opt_arg); break;
case 's': symmetrize_ = true; break;
case 'u': uniform_ = true; scale_ = atoi(opt_arg); break;
}
}
void PrintUsage() {
std::cout << name_ << std::endl;
// std::sort(help_strings_.begin(), help_strings_.end());
for (std::string h : help_strings_)
std::cout << h << std::endl;
std::exit(0);
}
int scale() const { return scale_; }
int degree() const { return degree_; }
std::string filename() const { return filename_; }
bool symmetrize() const { return symmetrize_; }
bool uniform() const { return uniform_; }
};
class CLApp : public CLBase {
protected:
bool do_analysis_ = false;
int num_trials_ = 16;
int64_t start_vertex_ = -1;
bool do_verify_ = false;
int gms_ = 0;
public:
CLApp(int argc, char** argv, std::string name) : CLBase(argc, argv, name) {
get_args_ += "an:r:v";
AddHelpLine('a', "", "output analysis of last run", "false");
AddHelpLine('n', "n", "number of trials", std::to_string(num_trials_));
//AddHelpLine('r', "node", "start from node r", "rand");
AddHelpLine('r', "gms", "0: use GMS, 1: use baseline, 2: use both");
AddHelpLine('v', "", "verify the output of each run", "false");
}
void HandleArg(signed char opt, char* opt_arg) override {
switch (opt) {
case 'a': do_analysis_ = true; break;
case 'n': num_trials_ = atoi(opt_arg); break;
//case 'r': start_vertex_ = atol(opt_arg); break;
case 'r': gms_ = atoi(opt_arg); break;
case 'v': do_verify_ = true; break;
default: CLBase::HandleArg(opt, opt_arg);
}
}
bool do_analysis() const { return do_analysis_; }
int num_trials() const { return num_trials_; }
int64_t start_vertex() const { return start_vertex_; }
bool do_verify() const { return do_verify_; }
};
class BenchCLApp : public CLApp
{
protected:
int num_omp_threads_ = 1;
public:
BenchCLApp(int argc, char** argv, std::string name) : CLApp(argc, argv, name)
{
get_args_ += "p:";
AddHelpLine('p', "p", "Optional number of OMP threads", std::to_string(num_omp_threads_));
}
void HandleArg(signed char opt, char* opt_arg) override
{
switch(opt)
{
case 'p' :
{
num_omp_threads_ = atoi(opt_arg);
#ifdef _OPENMP
omp_set_dynamic(0);
omp_set_num_threads(num_omp_threads_);
#pragma omp parallel
{
#pragma omp master
std::cout << "Using " << omp_get_num_threads() << " OMP threads" << std::endl;
}
#endif
#ifndef _OPENMP
std::cout << "OMP is disabled. Using 1 thread." << std::endl;
#endif
break;
}
default: CLApp::HandleArg(opt, opt_arg);
}
}
int num_omp_threads() const { return num_omp_threads_; }
};
class CLIterApp : public CLApp {
int num_iters_;
public:
CLIterApp(int argc, char** argv, std::string name, int num_iters) :
CLApp(argc, argv, name), num_iters_(num_iters) {
get_args_ += "i:";
AddHelpLine('i', "i", "perform i iterations", std::to_string(num_iters_));
}
void HandleArg(signed char opt, char* opt_arg) override {
switch (opt) {
case 'i': num_iters_ = atoi(opt_arg); break;
default: CLApp::HandleArg(opt, opt_arg);
}
}
int num_iters() const { return num_iters_; }
};
class CLPageRank : public CLApp {
int max_iters_;
double tolerance_;
public:
CLPageRank(int argc, char** argv, std::string name, double tolerance,
int max_iters) :
CLApp(argc, argv, name), max_iters_(max_iters), tolerance_(tolerance) {
get_args_ += "i:t:";
AddHelpLine('i', "i", "perform at most i iterations",
std::to_string(max_iters_));
AddHelpLine('t', "t", "use tolerance t", std::to_string(tolerance_));
}
void HandleArg(signed char opt, char* opt_arg) override {
switch (opt) {
case 'i': max_iters_ = atoi(opt_arg); break;
case 't': tolerance_ = std::stod(opt_arg); break;
default: CLApp::HandleArg(opt, opt_arg);
}
}
int max_iters() const { return max_iters_; }
double tolerance() const { return tolerance_; }
};
template<typename WeightT_ = int>
class CLDelta : public CLApp {
WeightT_ delta_ = 1;
public:
CLDelta(int argc, char** argv, std::string name) : CLApp(argc, argv, name) {
get_args_ += "d:";
AddHelpLine('d', "d", "delta parameter", std::to_string(delta_));
}
void HandleArg(signed char opt, char* opt_arg) override {
switch (opt) {
case 'd':
if (std::is_floating_point<WeightT_>::value)
delta_ = static_cast<WeightT_>(atof(opt_arg));
else
delta_ = static_cast<WeightT_>(atol(opt_arg));
break;
default: CLApp::HandleArg(opt, opt_arg);
}
}
WeightT_ delta() const { return delta_; }
};
class CLConvert : public CLBase {
std::string out_filename_ = "";
bool out_weighted_ = false;
bool out_el_ = false;
bool out_sg_ = false;
public:
CLConvert(int argc, char** argv, std::string name)
: CLBase(argc, argv, name) {
get_args_ += "e:b:w";
AddHelpLine('b', "file", "output serialized graph to file");
AddHelpLine('e', "file", "output edge list to file");
AddHelpLine('w', "file", "make output weighted");
}
void HandleArg(signed char opt, char* opt_arg) override {
switch (opt) {
case 'b': out_sg_ = true; out_filename_ = std::string(opt_arg); break;
case 'e': out_el_ = true; out_filename_ = std::string(opt_arg); break;
case 'w': out_weighted_ = true; break;
default: CLBase::HandleArg(opt, opt_arg);
}
}
std::string out_filename() const { return out_filename_; }
bool out_weighted() const { return out_weighted_; }
bool out_el() const { return out_el_; }
bool out_sg() const { return out_sg_; }
};
#endif // COMMAND_LINE_H_
|
xthi.c | #define _GNU_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sched.h>
#include <mpi.h>
#include <omp.h>
/* Borrowed from util-linux-2.13-pre7/schedutils/taskset.c */
static char *cpuset_to_cstr(cpu_set_t *mask, char *str)
{
char *ptr = str;
int i, j, entry_made = 0;
for (i = 0; i < CPU_SETSIZE; i++) {
if (CPU_ISSET(i, mask)) {
int run = 0;
entry_made = 1;
for (j = i + 1; j < CPU_SETSIZE; j++) {
if (CPU_ISSET(j, mask)) run++;
else break;
}
if (!run)
sprintf(ptr, "%d,", i);
else if (run == 1) {
sprintf(ptr, "%d,%d,", i, i + 1);
i++;
} else {
sprintf(ptr, "%d-%d,", i, i + run);
i += run;
}
while (*ptr != 0) ptr++;
}
}
ptr -= entry_made;
*ptr = 0;
return(str);
}
int main(int argc, char *argv[])
{
int rank, thread;
cpu_set_t coremask;
char clbuf[7 * CPU_SETSIZE], hnbuf[64];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
memset(clbuf, 0, sizeof(clbuf));
memset(hnbuf, 0, sizeof(hnbuf));
(void)gethostname(hnbuf, sizeof(hnbuf));
#pragma omp parallel private(thread, coremask, clbuf)
{
thread = omp_get_thread_num();
(void)sched_getaffinity(0, sizeof(coremask), &coremask);
cpuset_to_cstr(&coremask, clbuf);
#pragma omp barrier
printf("Hello from rank %d, thread %d, on %s. (core affinity = %s)\n",
rank, thread, hnbuf, clbuf);
}
MPI_Finalize();
return(0);
}
|
target-4.c | /* { dg-do run } */
#include <stdlib.h>
#define EPS 0.000001
#define N 100000
void init (double *a1, double *a2)
{
double s = -1;
int i;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
void check (double *a, double *b)
{
int i;
for (i = 0; i < N; i++)
if (a[i] - b[i] > EPS || b[i] - a[i] > EPS)
abort ();
}
void vec_mult_ref (double *p, double *v1, double *v2)
{
int i;
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
void vec_mult (double *p, double *v1, double *v2)
{
int i;
#pragma omp target map(to: v1[0:N], v2[:N]) map(from: p[0:N])
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
int main ()
{
double p1[N], p2[N];
double v1[N], v2[N];
init (v1, v2);
vec_mult_ref (p1, v1, v2);
vec_mult (p2, v1, v2);
check (p1, p2);
return 0;
}
|
GB_binop__lt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lt_fp32
// A.*B function (eWiseMult): GB_AemultB__lt_fp32
// A*D function (colscale): GB_AxD__lt_fp32
// D*A function (rowscale): GB_DxB__lt_fp32
// C+=B function (dense accum): GB_Cdense_accumB__lt_fp32
// C+=b function (dense accum): GB_Cdense_accumb__lt_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_fp32
// C=scalar+B GB_bind1st__lt_fp32
// C=scalar+B' GB_bind1st_tran__lt_fp32
// C=A+scalar GB_bind2nd__lt_fp32
// C=A'+scalar GB_bind2nd_tran__lt_fp32
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_FP32 || GxB_NO_LT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lt_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lt_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lt_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__lt_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__lt_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[MaxPixelChannels];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),(size_t)
GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
register ssize_t
i;
size_t
columns,
number_images,
rows;
number_images=GetImageListLength(images);
rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(number_images,MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
register ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise,
value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view,
**image_view;
const Image
*next;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
j,
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
image_view=(CacheView **) AcquireQuantumMemory(number_images,
sizeof(*image_view));
if (image_view == (CacheView **) NULL)
{
image=DestroyImage(image);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(image);
}
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
image_view[j]=AcquireVirtualCacheView(next,exception);
next=GetNextImageInList(next);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),op,
evaluate_pixel[j].channel[i]);
}
p[j]+=GetPixelChannels(next);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p[j]+=GetPixelChannels(next);
}
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory(p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
for (j=0; j < (ssize_t) number_images; j++)
image_view[j]=DestroyCacheView(image_view[j]);
image_view=(CacheView **) RelinquishMagickMemory(image_view);
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
hello.c | #include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
#include <mpix.h>
/************************************************************
* This is a simple hello world program in OpenMP and MPI
* with some BGQ specific additions to return additional
* node information by calling MPIX_Rank2torus
* ************************************************************/
int main(int argc,char **argv)
{
int myid, numprocs;
FILE *f1;
int i,resultlen;
int tn;
MPIX_Hardware_t hw;
int tid[6];
char myname[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
MPI_Get_processor_name(myname,&resultlen);
#pragma omp parallel
#pragma omp critical
{
tn=omp_get_thread_num();
printf("hybrid mpi/openmp says Hello from %6d:%2.2d on %s\n",myid,tn,myname);
MPIX_Rank2torus(myid,tid);
printf("%6d:%2.2d;%3d%3d%3d%3d%3d%3d %s\n",myid,tn,tid[0],tid[1],tid[2],tid[3],tid[4],tid[5]," coords");
MPIX_Hardware(&hw);
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.prank," Physical rank of the node (irrespective of mapping)");
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.psize," Size of the partition (irrespective of mapping) ");
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.ppn," Processes per node ");
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.coreID," Process id; values monotonically increase from 0..63 ");
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.clockMHz," Frequency in MegaHertz ");
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.memSize," Size of the core memory in MB ");
printf("%6d:%2.2d;%15u %s\n",myid,tn,hw.torus_dimension," Actual dimension for the torus");
printf("%6d:%2.2d;%3u%3u%3u%3u%3u %s\n",myid,tn,hw.Size[0],hw.Size[1],hw.Size[2],hw.Size[3],hw.Size[4]," Max coordinates on the torus");
printf("%6d:%2.2d;%3u%3u%3u%3u%3u %s\n",myid,tn,hw.Coords[0],hw.Coords[1],hw.Coords[2],hw.Coords[3],hw.Coords[4]," This node's coordinates");
printf("%6d:%2.2d;%3u%3u%3u%3u%3u %s\n",myid,tn,hw.isTorus[0],hw.isTorus[1],hw.isTorus[2],hw.isTorus[3],hw.isTorus[4]," Do we have wraparound links");
}
MPI_Finalize();
}
|
DRACC_OMP_023_MxV_Partially_Missing_Data_yes.c | /*
Matrix Vector multiplication with partially Matrix missing on Accelerator. Using the target enter data construct.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define C 512
int *a;
int *b;
int *c;
int init(){
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
int Mult(){
#pragma omp target map(to:a[0:C],b[0:C]) map(tofrom:c[0:C]) device(0)
{
#pragma omp teams distribute parallel for
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
c[i]+=b[j+i*C]*a[j];
}
}
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
a = malloc(C*sizeof(int));
b = malloc(C*C*sizeof(int));
c = malloc(C*sizeof(int));
init();
Mult();
check();
free(a);
free(b);
free(c);
return 0;
} |
MultiClassConvolutionalTsetlinMachine.c | /*
Copyright (c) 2019 Ole-Christoffer Granmo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This code implements the Convolutional Tsetlin Machine from paper arXiv:1905.09688
https://arxiv.org/abs/1905.09688
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "MultiClassConvolutionalTsetlinMachine.h"
/**************************************/
/*** The Convolutional Tsetlin Machine ***/
/**************************************/
/*** Initialize Tsetlin Machine ***/
struct MultiClassTsetlinMachine *CreateMultiClassTsetlinMachine(int number_of_classes, int number_of_clauses, int number_of_features, int number_of_patches, int number_of_ta_chunks, int number_of_state_bits, int T, double s, double s_range, int boost_true_positive_feedback, int weighted_clauses)
{
struct MultiClassTsetlinMachine *mc_tm = NULL;
mc_tm = (void *)malloc(sizeof(struct MultiClassTsetlinMachine));
mc_tm->number_of_classes = number_of_classes;
mc_tm->tsetlin_machines = (void *)malloc(sizeof(struct TsetlinMachine *)* number_of_classes);
for (int i = 0; i < number_of_classes; i++) {
mc_tm->tsetlin_machines[i] = CreateTsetlinMachine(number_of_clauses, number_of_features, number_of_patches, number_of_ta_chunks, number_of_state_bits, T, s, s_range, boost_true_positive_feedback, weighted_clauses);
}
mc_tm->number_of_patches = number_of_patches;
mc_tm->number_of_ta_chunks = number_of_ta_chunks;
mc_tm->number_of_state_bits = number_of_state_bits;
return mc_tm;
}
void mc_tm_initialize(struct MultiClassTsetlinMachine *mc_tm)
{
for (int i = 0; i < mc_tm->number_of_classes; i++) {
tm_initialize(mc_tm->tsetlin_machines[i]);
}
}
void mc_tm_destroy(struct MultiClassTsetlinMachine *mc_tm)
{
for (int i = 0; i < mc_tm->number_of_classes; i++) {
tm_destroy(mc_tm->tsetlin_machines[i]);
free(mc_tm->tsetlin_machines[i]);
}
free(mc_tm->tsetlin_machines);
}
void mc_tm_destroy2(struct MultiClassTsetlinMachine *mc_tm)
{
for (int i = 0; i < mc_tm->number_of_classes; i++) {
tm_destroy2(mc_tm->tsetlin_machines[i]);
free(mc_tm->tsetlin_machines[i]);
}
free(mc_tm->tsetlin_machines);
}
/***********************************/
/*** Predict classes of inputs X ***/
/***********************************/
/***********************************/
/*** Predict classes of inputs X ***/
/***********************************/
void mc_tm_predict(struct MultiClassTsetlinMachine *mc_tm, unsigned int *X, int *y, int number_of_examples)
{
unsigned int step_size = mc_tm->number_of_patches * mc_tm->number_of_ta_chunks;
int max_threads = omp_get_max_threads();
struct MultiClassTsetlinMachine **mc_tm_thread = (void *)malloc(sizeof(struct MultiClassTsetlinMachine *) * max_threads);
struct TsetlinMachine *tm = mc_tm->tsetlin_machines[0];
for (int t = 0; t < max_threads; t++) {
mc_tm_thread[t] = CreateMultiClassTsetlinMachine(mc_tm->number_of_classes, tm->number_of_clauses, tm->number_of_features, tm->number_of_patches, tm->number_of_ta_chunks, tm->number_of_state_bits, tm->T, tm->s, tm->s_range, tm->boost_true_positive_feedback, tm->weighted_clauses);
for (int i = 0; i < mc_tm->number_of_classes; i++) {
free(mc_tm_thread[t]->tsetlin_machines[i]->ta_state);
mc_tm_thread[t]->tsetlin_machines[i]->ta_state = mc_tm->tsetlin_machines[i]->ta_state;
free(mc_tm_thread[t]->tsetlin_machines[i]->clause_weights);
mc_tm_thread[t]->tsetlin_machines[i]->clause_weights = mc_tm->tsetlin_machines[i]->clause_weights;
}
}
#pragma omp parallel for
for (int l = 0; l < number_of_examples; l++) {
int thread_id = omp_get_thread_num();
unsigned int pos = l*step_size;
// Identify class with largest output
int max_class_sum = tm_score(mc_tm_thread[thread_id]->tsetlin_machines[0], &X[pos]);
int max_class = 0;
for (int i = 1; i < mc_tm_thread[thread_id]->number_of_classes; i++) {
int class_sum = tm_score(mc_tm_thread[thread_id]->tsetlin_machines[i], &X[pos]);
if (max_class_sum < class_sum) {
max_class_sum = class_sum;
max_class = i;
}
}
y[l] = max_class;
}
for (int t = 0; t < max_threads; t++) {
mc_tm_destroy2(mc_tm_thread[t]);
free(mc_tm_thread[t]);
}
free(mc_tm_thread);
return;
}
/******************************************/
/*** Online Training of Tsetlin Machine ***/
/******************************************/
// The Tsetlin Machine can be trained incrementally, one training example at a time.
// Use this method directly for online and incremental training.
void mc_tm_update(struct MultiClassTsetlinMachine *mc_tm, unsigned int *Xi, int target_class)
{
tm_update(mc_tm->tsetlin_machines[target_class], Xi, 1);
// Randomly pick one of the other classes, for pairwise learning of class output
unsigned int negative_target_class = (unsigned int)mc_tm->number_of_classes * 1.0*rand()/((unsigned int)RAND_MAX + 1);
while (negative_target_class == target_class) {
negative_target_class = (unsigned int)mc_tm->number_of_classes * 1.0*rand()/((unsigned int)RAND_MAX + 1);
}
tm_update(mc_tm->tsetlin_machines[negative_target_class], Xi, 0);
}
/**********************************************/
/*** Batch Mode Training of Tsetlin Machine ***/
/**********************************************/
void mc_tm_fit(struct MultiClassTsetlinMachine *mc_tm, unsigned int *X, int *y, int number_of_examples, int epochs)
{
unsigned int step_size = mc_tm->number_of_patches * mc_tm->number_of_ta_chunks;
int max_threads = omp_get_max_threads();
struct MultiClassTsetlinMachine **mc_tm_thread = (void *)malloc(sizeof(struct MultiClassTsetlinMachine *) * max_threads);
struct TsetlinMachine *tm = mc_tm->tsetlin_machines[0];
for (int i = 0; i < mc_tm->number_of_classes; i++) {
mc_tm->tsetlin_machines[i]->clause_lock = (omp_lock_t *)malloc(sizeof(omp_lock_t) * tm->number_of_clauses);
for (int j = 0; j < tm->number_of_clauses; ++j) {
omp_init_lock(&mc_tm->tsetlin_machines[i]->clause_lock[j]);
}
}
for (int t = 0; t < max_threads; t++) {
mc_tm_thread[t] = CreateMultiClassTsetlinMachine(mc_tm->number_of_classes, tm->number_of_clauses, tm->number_of_features, tm->number_of_patches, tm->number_of_ta_chunks, tm->number_of_state_bits, tm->T, tm->s, tm->s_range, tm->boost_true_positive_feedback, tm->weighted_clauses);
for (int i = 0; i < mc_tm->number_of_classes; i++) {
free(mc_tm_thread[t]->tsetlin_machines[i]->ta_state);
mc_tm_thread[t]->tsetlin_machines[i]->ta_state = mc_tm->tsetlin_machines[i]->ta_state;
free(mc_tm_thread[t]->tsetlin_machines[i]->clause_weights);
mc_tm_thread[t]->tsetlin_machines[i]->clause_weights = mc_tm->tsetlin_machines[i]->clause_weights;
mc_tm_thread[t]->tsetlin_machines[i]->clause_lock = mc_tm->tsetlin_machines[i]->clause_lock;
}
}
for (int epoch = 0; epoch < epochs; epoch++) {
#pragma omp parallel for
for (int l = 0; l < number_of_examples; l++) {
int thread_id = omp_get_thread_num();
unsigned int pos = l*step_size;
mc_tm_update(mc_tm_thread[thread_id], &X[pos], y[l]);
}
}
for (int i = 0; i < mc_tm->number_of_classes; i++) {
for (int j = 0; j < tm->number_of_clauses; ++j) {
omp_destroy_lock(&mc_tm->tsetlin_machines[i]->clause_lock[j]);
}
}
free(tm->clause_lock);
free(mc_tm_thread);
}
int mc_tm_ta_state(struct MultiClassTsetlinMachine *mc_tm, int class, int clause, int ta)
{
return tm_ta_state(mc_tm->tsetlin_machines[class], clause, ta);
}
int mc_tm_ta_action(struct MultiClassTsetlinMachine *mc_tm, int class, int clause, int ta)
{
return tm_ta_action(mc_tm->tsetlin_machines[class], clause, ta);
}
void mc_tm_clause_configuration(struct MultiClassTsetlinMachine *mc_tm, int class, int clause, unsigned int *clause_configuration)
{
for (int k = 0; k < mc_tm->tsetlin_machines[class]->number_of_features; ++k) {
clause_configuration[k] = tm_ta_action(mc_tm->tsetlin_machines[class], clause, k);
}
return;
}
int mc_tm_clause_weight(struct MultiClassTsetlinMachine *mc_tm, int class, int clause)
{
return(mc_tm->tsetlin_machines[class]->clause_weights[clause]);
}
/*****************************************************/
/*** Storing and Loading of Tsetlin Machine State ****/
/*****************************************************/
void mc_tm_get_state(struct MultiClassTsetlinMachine *mc_tm, int class, unsigned int *clause_weights, unsigned int *ta_state)
{
tm_get_ta_state(mc_tm->tsetlin_machines[class], ta_state);
tm_get_clause_weights(mc_tm->tsetlin_machines[class], clause_weights);
return;
}
void mc_tm_set_state(struct MultiClassTsetlinMachine *mc_tm, int class, unsigned int *clause_weights, unsigned int *ta_state)
{
tm_set_ta_state(mc_tm->tsetlin_machines[class], ta_state);
tm_set_clause_weights(mc_tm->tsetlin_machines[class], clause_weights);
return;
}
/******************************************************************************/
/*** Clause Based Transformation of Input Examples for Multi-layer Learning ***/
/******************************************************************************/
void mc_tm_transform(struct MultiClassTsetlinMachine *mc_tm, unsigned int *X, unsigned int *X_transformed, int invert, int number_of_examples)
{
unsigned int step_size = mc_tm->number_of_patches * mc_tm->number_of_ta_chunks;
int max_threads = omp_get_max_threads();
struct MultiClassTsetlinMachine **mc_tm_thread = (void *)malloc(sizeof(struct MultiClassTsetlinMachine *) * max_threads);
struct TsetlinMachine *tm = mc_tm->tsetlin_machines[0];
for (int t = 0; t < max_threads; t++) {
mc_tm_thread[t] = CreateMultiClassTsetlinMachine(mc_tm->number_of_classes, tm->number_of_clauses, tm->number_of_features, tm->number_of_patches, tm->number_of_ta_chunks, tm->number_of_state_bits, tm->T, tm->s, tm->s_range, tm->boost_true_positive_feedback, tm->weighted_clauses);
for (int i = 0; i < mc_tm->number_of_classes; i++) {
free(mc_tm_thread[t]->tsetlin_machines[i]->ta_state);
mc_tm_thread[t]->tsetlin_machines[i]->ta_state = mc_tm->tsetlin_machines[i]->ta_state;
free(mc_tm_thread[t]->tsetlin_machines[i]->clause_weights);
mc_tm_thread[t]->tsetlin_machines[i]->clause_weights = mc_tm->tsetlin_machines[i]->clause_weights;
}
}
#pragma omp parallel for
for (int l = 0; l < number_of_examples; l++) {
int thread_id = omp_get_thread_num();
unsigned int pos = l*step_size;
for (int i = 0; i < mc_tm->number_of_classes; i++) {
tm_score(mc_tm_thread[thread_id]->tsetlin_machines[i], &X[pos]);
for (int j = 0; j < mc_tm->tsetlin_machines[i]->number_of_clauses; ++j) {
unsigned long transformed_feature = l*mc_tm->number_of_classes*mc_tm->tsetlin_machines[i]->number_of_clauses + i*mc_tm->tsetlin_machines[i]->number_of_clauses + j;
int clause_chunk = j / 32;
int clause_pos = j % 32;
int clause_output = (mc_tm_thread[thread_id]->tsetlin_machines[i]->clause_output[clause_chunk] & (1 << clause_pos)) > 0;
if (clause_output && !invert) {
X_transformed[transformed_feature] = 1;
} else if (!clause_output && invert) {
X_transformed[transformed_feature] = 1;
} else {
X_transformed[transformed_feature] = 0;
}
}
}
}
for (int t = 0; t < max_threads; t++) {
mc_tm_destroy2(mc_tm_thread[t]);
free(mc_tm_thread[t]);
}
free(mc_tm_thread);
return;
}
|
GB_binop__pow_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint64)
// C=scalar+B GB (_bind1st__pow_uint64)
// C=scalar+B' GB (_bind1st_tran__pow_uint64)
// C=A+scalar GB (_bind2nd__pow_uint64)
// C=A'+scalar GB (_bind2nd_tran__pow_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_pow_uint64 (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT64 || GxB_NO_POW_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test11.c | int g1 = 10;
int g2 = 20;
void foo () {
0;
l1:
#pragma omp barrier
1;
}
void bar() {
0;
l2:
#pragma omp barrier
1;
}
int main() {
#pragma omp parallel
{
2;
// g1 = 20;
if (3) {
4;
foo ();
5;
} else {
6;
g2 = 10;
l3:
#pragma omp barrier
7;
}
foobar(g1);
foobar(g2);
if (8) {
9;
bar();
10;
} else {
11;
l4:
#pragma omp barrier
12;
}
13;
}
}
|
louvain_imm.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_LOUVAIN_IMM_H
#define RIPPLES_LOUVAIN_IMM_H
#include <queue>
#include <string>
#include <type_traits>
#include <vector>
#include "ripples/find_most_influential.h"
#include "ripples/generate_rrr_sets.h"
#include "ripples/imm.h"
#include "ripples/imm_execution_record.h"
#include "spdlog/fmt/ostr.h"
#include "spdlog/sinks/stdout_color_sinks.h"
#include "spdlog/spdlog.h"
namespace ripples {
struct LouvainIMMConfiguration : public IMMConfiguration {
std::string communityList;
void addCmdOptions(CLI::App &app) {
IMMConfiguration::addCmdOptions(app);
app.add_option("--community-map", communityList,
"The filename of the community map.")
->required()
->group("Algorithm Options");
}
};
struct LouvainIMMExecutionRecord : public IMMExecutionRecord {};
namespace {
template <typename vertex_type>
struct Compare {
bool operator()(std::pair<vertex_type, size_t> &a,
std::pair<vertex_type, size_t> &b) const {
return a.second < b.second;
}
};
} // namespace
template <typename GraphTy, typename RRRset, typename execution_tag>
auto FindMostInfluentialSet(const std::vector<GraphTy> &communities, size_t k,
std::vector<std::vector<RRRset>> &RRRcollection,
execution_tag &&ex_tag) {
spdlog::get("console")->info("SeedSelect start");
using vertex_type = typename GraphTy::vertex_type;
Compare<vertex_type> cmp;
using priorityQueue =
std::priority_queue<std::pair<vertex_type, size_t>,
std::vector<std::pair<vertex_type, size_t>>,
decltype(cmp)>;
// Count occurrencies for all communities
std::vector<std::vector<uint32_t>> coverageVectors(communities.size());
std::vector<priorityQueue> queues(communities.size());
std::vector<typename std::vector<RRRset>::iterator> ends(communities.size());
double total_delta = 0;
#pragma omp parallel for reduction(+ : total_delta)
for (size_t i = 0; i < communities.size(); ++i) {
coverageVectors[i] = std::vector<uint32_t>(communities[i].num_nodes(), 0);
CountOccurrencies(RRRcollection[i].begin(), RRRcollection[i].end(),
coverageVectors[i].begin(), coverageVectors[i].end(),
std::forward<execution_tag>(ex_tag));
std::vector<std::pair<vertex_type, size_t>> queue_storage(
communities[i].num_nodes());
InitHeapStorage(coverageVectors[i].begin(), coverageVectors[i].end(),
queue_storage.begin(), queue_storage.end(),
std::forward<execution_tag>(ex_tag));
queues[i] = std::move(priorityQueue(cmp, std::move(queue_storage)));
ends[i] = RRRcollection[i].end();
total_delta += RRRcollection[i].size();
}
spdlog::get("console")->flush();
// Init on heap per community
using vertex_contribution_pair = std::pair<vertex_type, double>;
std::vector<vertex_contribution_pair> global_heap(
k + 1, vertex_contribution_pair{-1, -1.0});
std::vector<uint64_t> active_communities(communities.size(), 1);
auto heap_cmp = [](const vertex_contribution_pair &a,
const vertex_contribution_pair &b) -> bool {
return a.second > b.second;
};
std::make_heap(global_heap.begin(), global_heap.end(), heap_cmp);
// std::mutex global_heap_mutex;
// for each communities do in parallel
size_t iteration = 0;
while (!std::all_of(active_communities.begin(), active_communities.end(),
[](const uint64_t &v) -> bool { return v == 0; })) {
for (size_t i = 0; i < communities.size(); ++i) {
if (active_communities[i] == 0) continue;
if (queues[i].empty()) {
active_communities[i] = 0;
continue;
}
auto element = queues[i].top();
queues[i].pop();
while (element.second > coverageVectors[i][element.first]) {
element.second = coverageVectors[i][element.first];
queues[i].push(element);
element = queues[i].top();
queues[i].pop();
}
auto cmp = [=](const RRRset &a) -> auto {
return !std::binary_search(a.begin(), a.end(), element.first);
};
auto itr = partition(RRRcollection[i].begin(), ends[i], cmp,
std::forward<execution_tag>(ex_tag));
if (std::distance(itr, ends[i]) <
std::distance(RRRcollection[i].begin(), itr)) {
UpdateCounters(itr, ends[i], coverageVectors[i],
std::forward<execution_tag>(ex_tag));
} else {
if (std::is_same<execution_tag, omp_parallel_tag>::value) {
#pragma omp parallel for simd
for (size_t j = 0; j < coverageVectors[i].size(); ++j)
coverageVectors[i][j] = 0;
} else {
std::fill(coverageVectors[i].begin(), coverageVectors[i].end(), 0);
}
CountOccurrencies(RRRcollection[i].begin(), itr,
coverageVectors[i].begin(), coverageVectors[i].end(),
std::forward<execution_tag>(ex_tag));
}
ends[i] = itr;
double contribution = RRRcollection[i].size()
? element.second / RRRcollection[i].size()
: 0;
vertex_contribution_pair vcp{communities[i].convertID(element.first),
contribution};
// Handle the global index insertion
// std::lock_guard<std::mutex> _(global_heap_mutex);
std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp);
global_heap.back() = vcp;
std::push_heap(global_heap.begin(), global_heap.end(), heap_cmp);
if (global_heap.front() == vcp) active_communities[i] = 0;
}
}
std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp);
global_heap.pop_back();
double coverage = 0;
std::vector<typename GraphTy::vertex_type> seeds;
seeds.reserve(k);
for (auto e : global_heap) {
seeds.push_back(e.first);
coverage += e.second;
}
return seeds;
}
template <typename GraphTy, typename ConfTy, typename GeneratorTy,
typename RecordTy, typename diff_model_tag>
auto LouvainIMM(const std::vector<GraphTy> &communities, ConfTy &CFG, double l,
GeneratorTy &gen, std::vector<RecordTy> &records, diff_model_tag &&model_tag,
sequential_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
using RRRsetCollection = std::vector<RRRset<GraphTy>>;
std::vector<RRRsetCollection> R(communities.size());
// For each community do ThetaEstimation and Sampling
for (size_t i = 0; i < communities.size(); ++i) {
double l_1 = l * (1 + 1 / std::log2(communities[i].num_nodes()));
R[i] = Sampling(communities[i], CFG, l_1, gen, records[i],
std::forward<diff_model_tag>(model_tag),
std::forward<sequential_tag>(ex_tag));
}
// Global seed selection using the heap
auto S = FindMostInfluentialSet(communities, k, R,
std::forward<sequential_tag>(ex_tag));
return std::make_pair(S, records);
}
//! Influence Maximization using Community Structure.
//!
//! The algorithm uses the Louvain method for community detection and then
//! IMM to select seeds frome the communities.
//!
//! \tparam GraphTy The type of the input graph.
//! \tparam PRNG The type of the parallel random number generator.
//! \tparam diff_model_tag Type-Tag to selecte the diffusion model.
//! \tparam execution_tag Type-Tag to select the execution policy.
//!
//! \param communities The input graphs. The graphs are transoposed.
//! \param k The size of the seed set.
//! \param epsilon The parameter controlling the approximation guarantee.
//! \param l Parameter usually set to 1.
//! \param gen The parallel random number generator.
//! \param model_tag The diffusion model tag.
//! \param ex_tag The execution policy tag.
template <typename GraphTy, typename ConfTy, typename GeneratorTy,
typename diff_model_tag>
auto LouvainIMM(const std::vector<GraphTy> &communities, ConfTy &CFG, double l,
std::vector<GeneratorTy> &gen, diff_model_tag &&model_tag,
omp_parallel_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
using RRRsetCollection = std::vector<RRRset<GraphTy>>;
std::vector<RRRsetCollection> R(communities.size());
// For each community do ThetaEstimation and Sampling
for (size_t i = 0; i < communities.size(); ++i) {
double l_1 = l * (1 + 1 / std::log2(communities[i].num_nodes()));
R[i] = Sampling(communities[i], CFG, l_1, gen[i], gen[i].execution_record(),
std::forward<diff_model_tag>(model_tag),
std::forward<omp_parallel_tag>(ex_tag));
}
// Global seed selection using the heap
auto S = FindMostInfluentialSet(communities, k, R,
std::forward<omp_parallel_tag>(ex_tag));
std::vector<IMMExecutionRecord> records(communities.size());
for (auto & generator : gen) {
records.push_back(generator.execution_record());
}
return std::make_pair(S, records);
}
} // namespace ripples
#endif /* RIPPLES_LOUVAIN_IMM_H */
|
nn_openmp.c | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#ifdef GEM5_WORK
#include <stdint.h>
void m5_dumpreset_stats(uint64_t ns_delay, uint64_t ns_period);
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
#endif
#define MAX_ARGS 10
#define REC_LENGTH 49 // size of a record in db
#define REC_WINDOW 10 // number of records to read at a time
#define LATITUDE_POS 28 // location of latitude coordinates in input record
#define OPEN 10000 // initial value of nearest neighbors
struct neighbor {
char entry[REC_LENGTH];
double dist;
};
#define MAX_FILENAME_LEN 512
/**
* This program finds the k-nearest neighbors
* Usage: ./nn <filelist> <num> <target latitude> <target longitude>
* filelist: File with the filenames to the records
* num: Number of nearest neighbors to find
* target lat: Latitude coordinate for distance calculations
* target long: Longitude coordinate for distance calculations
* The filelist and data are generated by hurricane_gen.c
* REC_WINDOW has been arbitrarily assigned; A larger value would allow more work for the threads
*/
int main(int argc, char* argv[]) {
FILE *flist,*fp;
int i=0,j=0, k=0, rec_count=0, done=0;
char sandbox[REC_LENGTH * REC_WINDOW], *rec_iter, dbname[64];
struct neighbor *neighbors = NULL;
float target_lat, target_long, tmp_lat=0, tmp_long=0;
if(argc < 5) {
fprintf(stderr, "Invalid set of arguments\n");
exit(-1);
}
flist = fopen(argv[1], "r");
if(!flist) {
printf("error opening flist\n");
exit(1);
}
k = atoi(argv[2]);
target_lat = atof(argv[3]);
target_long = atof(argv[4]);
neighbors = malloc(k*sizeof(struct neighbor));
if(neighbors == NULL) {
fprintf(stderr, "no room for neighbors\n");
exit(0);
}
for( j = 0 ; j < k ; j++ ) { //Initialize list of nearest neighbors to very large dist
neighbors[j].dist = OPEN;
}
/**** main processing ****/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
char dbfilename[MAX_FILENAME_LEN];
sprintf(dbfilename, "%s", argv[1]);
int start_index = 0;
for (i=0; i < MAX_FILENAME_LEN; i++) {
if (dbfilename[i] == '/') {
start_index = i+1;
} else if (dbfilename[i] == 0) {
break;
}
}
sprintf(&dbfilename[start_index], "%s", dbname);
fp = fopen(dbfilename, "r");
if(!fp) {
printf("error opening flist\n");
exit(1);
}
float *z;
z = (float *) malloc(REC_WINDOW * sizeof(float));
#ifdef GEM5_WORK
m5_work_begin(0, 0);
m5_dumpreset_stats(0, 0);
#endif
while(!done) {
//Read in REC_WINDOW number of records
rec_count = fread(sandbox, REC_LENGTH, REC_WINDOW, fp);
if( rec_count != REC_WINDOW ) {
if(!ferror(flist)) {// an eof occured
fclose(fp);
if(feof(flist))
done = 1;
else {
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
sprintf(&dbfilename[start_index], "%s", dbname);
fp = fopen(dbfilename, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
}
} else {
perror("Error");
exit(0);
}
}
/* Launch threads to */
#pragma omp parallel for shared(z, target_lat, target_long) private(i, rec_iter, tmp_lat, tmp_long)
for( i = 0 ; i < rec_count ; i++ ) {
rec_iter = sandbox+(i * REC_LENGTH + LATITUDE_POS - 1);
sscanf(rec_iter, "%f %f", &tmp_lat, &tmp_long);
z[i] = sqrt(( (tmp_lat-target_lat) * (tmp_lat-target_lat) )+( (tmp_long-target_long) * (tmp_long-target_long) ));
} /* omp end parallel */
#pragma omp barrier
for( i = 0 ; i < rec_count ; i++ ) {
float max_dist = -1;
int max_idx = 0;
// find a neighbor with greatest dist and take his spot if allowed!
for( j = 0 ; j < k ; j++ ) {
if( neighbors[j].dist > max_dist ) {
max_dist = neighbors[j].dist;
max_idx = j;
}
}
// compare each record with max value to find the nearest neighbor
if( z[i] < neighbors[max_idx].dist ) {
sandbox[(i+1)*REC_LENGTH-1] = '\0';
strcpy(neighbors[max_idx].entry, sandbox +i*REC_LENGTH);
neighbors[max_idx].dist = z[i];
}
}
}//End while loop
#ifdef GEM5_WORK
m5_dumpreset_stats(0, 0);
m5_work_end(0, 0);
#endif
fprintf(stderr, "The %d nearest neighbors are:\n", k);
for( j = 0 ; j < k ; j++ ) {
if( !(neighbors[j].dist == OPEN) )
fprintf(stderr, "%s --> %f\n", neighbors[j].entry, neighbors[j].dist);
}
fclose(flist);
return 0;
}
|
conv_im2col_sgemm_neon_packed.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv_im2col_sgemm_neon_packed(const Mat &bottom_blob, Mat &top_blob,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt,
int inch, int outw, int outh, int outch)
{
//size_t elemsize = bottom_blob.elemsize;
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
const Mat bottom_im2col = bottom_blob;
Mat bottom_tm = top_blob;
{
int nn_size = out_size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/8);
for (int q=0; q<inch*kernel_size; q++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1] \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0] \n"
"vst1.f32 {d0-d3}, [%1] \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
#endif // __aarch64__
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
#endif // __ARM_NEON
tmpptr += 8;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/8 + i%8);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
}
}
|
search_function.h | #include <random>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <unordered_map>
#include <unordered_set>
#include <map>
#include <cmath>
#include <ctime>
#include <queue>
#include <vector>
#include <omp.h>
#include <chrono>
#include <limits>
#include <sys/time.h>
#include <algorithm>
#include <ctime>
#include "support_classes.h"
#include "visited_list_pool.h"
using namespace std;
struct triple_result {
priority_queue<pair<float, int > > topk;
int hops;
int dist_calc;
int degree;
};
void MakeStep(vector <uint32_t> &graph_level, const float *query, const float* db,
priority_queue<pair<float, int > > &topResults,
priority_queue<std::pair<float, int > > &candidateSet,
Metric *metric, uint32_t d, int &query_dist_calc, bool &found, int &ef, int &k,
VisitedList *vl) {
vl_type *massVisited = vl->mass;
vl_type currentV = vl->curV;
for (int j = 0; j < graph_level.size(); ++j) {
int neig_num = graph_level[j];
if (massVisited[neig_num] != currentV) {
massVisited[neig_num] = currentV;
const float *neig_coord = db + neig_num * d;
float dist = metric->Dist(query, neig_coord, d);
query_dist_calc++;
if (topResults.top().first > dist || topResults.size() < ef) {
candidateSet.emplace(-dist, neig_num);
found = true;
topResults.emplace(dist, neig_num);
if (topResults.size() > ef)
topResults.pop();
}
}
}
}
triple_result search(const float *query, const float* db, uint32_t N, uint32_t d,
vector<vector <uint32_t> > &main_graph, vector<vector <uint32_t> > &auxiliary_graph,
int ef, int k, vector<uint32_t> &inter_points, Metric *metric,
VisitedListPool *visitedlistpool,
bool use_second_graph, bool llf, uint32_t hops_bound) {
std::priority_queue<std::pair<float, int > > topResults;
int query_dist_calc = 1;
int num_hops = 0;
for (int i = 0; i < inter_points.size(); ++i) {
std::priority_queue<std::pair<float, int > > candidateSet;
const float* start = db + inter_points[i]*d;
float dist = metric->Dist(query, start, d);
topResults.emplace(dist, inter_points[i]);
candidateSet.emplace(-dist, inter_points[i]);
VisitedList *vl = visitedlistpool->getFreeVisitedList();
vl_type *massVisited = vl->mass;
vl_type currentV = vl->curV;
massVisited[inter_points[i]] = currentV;
while (!candidateSet.empty()) {
std::pair<float, int> curr_el_pair = candidateSet.top();
if (-curr_el_pair.first > topResults.top().first) break;
candidateSet.pop();
int curNodeNum = curr_el_pair.second;
bool auxiliary_found = false;
if (use_second_graph and num_hops < hops_bound) {
vector <uint32_t> curAuxiliaryNodeNeighbors = auxiliary_graph[curNodeNum];
MakeStep(curAuxiliaryNodeNeighbors, query, db,
topResults, candidateSet,
metric,
d, query_dist_calc, auxiliary_found, ef, k,
vl);
}
if (!(auxiliary_found * llf) or !use_second_graph) {
vector <uint32_t> curMainNodeNeighbors = main_graph[curNodeNum];
MakeStep(curMainNodeNeighbors, query, db,
topResults, candidateSet,
metric,
d, query_dist_calc, auxiliary_found, ef, k,
vl);
}
num_hops++;
}
visitedlistpool->releaseVisitedList(vl);
}
while (topResults.size() > k) {
topResults.pop();
}
triple_result ans{topResults, num_hops, query_dist_calc};
return ans;
}
int GetRealNearest(const float* point_q, int k, int d, int d_low, priority_queue<pair<float, int > > &topk,
vector<float> &ds,
Metric *metric) {
const float* point_i = ds.data() + d * topk.top().second;
float min_dist = metric->Dist(point_i, point_q, d);
int real_topk = topk.top().second;
topk.pop();
float dist;
while (!topk.empty()) {
point_i = ds.data() + d * topk.top().second;
dist = metric->Dist(point_i, point_q, d);
if (dist < min_dist) {
min_dist = dist;
real_topk = topk.top().second;
}
topk.pop();
}
return real_topk;
}
void get_one_test(vector<vector<uint32_t> > &knn_graph, vector<vector<uint32_t> > &kl_graph,
vector<float> &ds, vector<float> &queries, vector<float> &ds_low, vector<float> &queries_low,
vector<uint32_t> &truth,
int n, int d, int d_low, int n_q, int n_tr, int ef, int k, string graph_name,
Metric *metric, const char* output_txt,
vector<vector<uint32_t> > inter_points, bool use_second_graph, bool llf, uint32_t hops_bound, int dist_calc_boost,
int recheck_size, int number_exper, int number_of_threads) {
std::ofstream outfile;
outfile.open(output_txt, std::ios_base::app);
VisitedListPool *visitedlistpool = new VisitedListPool(1, n);
int hops = 0;
int dist_calc = 0 + dist_calc_boost * n_q;
float acc = 0;
float work_time = 0;
int num_exp = 0;
omp_set_num_threads(number_of_threads);
for (int v = 0; v < number_exper; ++v) {
num_exp += 1;
vector<int> ans(n_q);
StopW stopw = StopW();
#pragma omp parallel for
for (int i = 0; i < n_q; ++i) {
triple_result tr;
const float* point_q = queries.data() + i * d;
const float* point_q_low = queries_low.data() + i * d_low;
if (d != d_low) {
if (recheck_size > 0) {
tr = search(point_q_low, ds_low.data(), n, d_low, knn_graph, kl_graph, recheck_size,
recheck_size, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound);
ans[i] = GetRealNearest(point_q, k, d, d_low, tr.topk, ds, metric);
dist_calc += recheck_size;
} else {
tr = search(point_q_low, ds_low.data(), n, d_low, knn_graph, kl_graph, ef,
k, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound);
while (tr.topk.size() > k) {
tr.topk.pop();
}
ans[i] = tr.topk.top().second;
}
} else {
tr = search(point_q, ds.data(), n, d, knn_graph, kl_graph, ef,
k, inter_points[i], metric, visitedlistpool, use_second_graph, llf, hops_bound);
while (tr.topk.size() > k) {
tr.topk.pop();
}
ans[i] = tr.topk.top().second;
}
hops += tr.hops;
dist_calc += tr.dist_calc;
}
work_time += stopw.getElapsedTimeMicro();
int print = 0;
for (int i = 0; i < n_q; ++i) {
acc += ans[i] == truth[i * n_tr];
}
}
cout << "graph_type " << graph_name << " acc " << acc / (num_exp * n_q) << " hops " << hops / (num_exp * n_q) << " dist_calc "
<< dist_calc / (num_exp * n_q) << " work_time " << work_time / (num_exp * 1e6 * n_q) << endl;
outfile << "graph_type " << graph_name << " acc " << acc / (num_exp * n_q) << " hops " << hops / (num_exp * n_q) << " dist_calc "
<< dist_calc / (num_exp * n_q) << " work_time " << work_time / (num_exp * 1e6 * n_q) << endl;
}
void get_synthetic_tests(int n, int d, int n_q, int n_tr, std::mt19937 random_gen,
vector< vector<uint32_t> > &knn, vector< vector<uint32_t> > &kl, vector<float> &db,
vector<float> &queries, vector<uint32_t> &truth, const char* output_txt,
Metric *metric, string graph_name, bool use_second_graph, bool llf, bool beam_search,
bool knn_by_threshold) {
vector<vector<uint32_t> > inter_points(n_q);
int num = 0;
uniform_int_distribution<int> uniform_distr(0, n-1);
for (int j=0; j < n_q; ++j) {
num = uniform_distr(random_gen);
inter_points[j].push_back(num);
}
vector<int> ef_coeff;
vector<int> k_coeff;
vector<float> thr_coeff;
uint32_t hops_bound = 11;
int recheck_size = -1;
int knn_size = FindGraphAverageDegree(knn);
if (beam_search) {
vector<int> k_coeff_{knn_size, knn_size, knn_size, knn_size, knn_size, knn_size};
k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end());
} else {
vector<int> ef_coeff_{1, 1, 1, 1, 1, 1};
ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end());
}
if (d == 3) {
if (beam_search) {
vector<int> ef_coeff_{10, 15, 20, 25, 30};
ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end());
} else {
vector<int> k_coeff_{12, 14, 16, 18, 20};
k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end());
vector<float> thr_coeff_{1.1, 1.2, 1.3, 1.4, 1.5};
thr_coeff.insert(thr_coeff.end(), thr_coeff_.begin(), thr_coeff_.end());
}
hops_bound = 11;
} else if (d == 5) {
if (beam_search) {
vector<int> ef_coeff_{7, 10, 15, 22, 25, 30};
ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end());
} else {
vector<int> k_coeff_{15, 20, 25, 30, 40, 60};
k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end());
vector<float> thr_coeff_{1.1, 1.15, 1.2, 1.3, 1.4, 1.5};
thr_coeff.insert(thr_coeff.end(), thr_coeff_.begin(), thr_coeff_.end());
}
hops_bound = 7;
} else if (d == 9) {
if (beam_search) {
vector<int> ef_coeff_{5, 8, 15, 25, 30, 35};
ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end());
} else {
vector<int> k_coeff_{60, 100, 150, 200, 250, 300};
k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end());
vector<float> thr_coeff_{1.25, 1.3, 1.35, 1.4, 1.45, 1.5};
thr_coeff.insert(thr_coeff.end(), thr_coeff_.begin(), thr_coeff_.end());
}
hops_bound = 5;
} else if (d == 17) {
if (beam_search) {
vector<int> ef_coeff_{10, 40, 70, 100, 130, 160};
ef_coeff.insert(ef_coeff.end(), ef_coeff_.begin(), ef_coeff_.end());
} else {
vector<int> k_coeff_{750, 1000, 1250, 1500, 1750, 2000};
k_coeff.insert(k_coeff.end(), k_coeff_.begin(), k_coeff_.end());
vector<float> thr_coeff_{1.1, 1.15, 1.17, 1.19, 1.21, 1.22};
thr_coeff.insert(thr_coeff.end(), thr_coeff_.begin(), thr_coeff_.end());
}
hops_bound = 4;
}
int exp_size = min(ef_coeff.size(), k_coeff.size());
for (int i=0; i < exp_size; ++i) {
vector< vector <uint32_t>> knn_cur;
if (beam_search) {
knn_cur = knn;
} else if (knn_by_threshold) {
float thr = asin(thr_coeff[i] * pow(2, 0.5) * pow(n, - 1. / d));
knn_cur = CutKNNbyThreshold(knn, db, thr, n, d, metric);
// cout << "threshold " << thr << ", thr_coeff[i] " << thr_coeff[i] << endl;
} else {
knn_cur = CutKNNbyK(knn, db, k_coeff[i], n, d, metric);
}
// cout << "knn_cur " << FindGraphAverageDegree(knn_cur) << endl;
get_one_test(knn_cur, kl, db, queries, db, queries, truth, n, d, d, n_q, n_tr, ef_coeff[i], 1,
graph_name, metric, output_txt, inter_points, use_second_graph, llf, hops_bound, 0, recheck_size, 1, omp_get_max_threads());
}
}
|
spacetime_heat_sl_kernel_antiderivative.h | /*
Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of
Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of VSB - Technical University of Ostrava and Graz
University of Technology nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND
GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file spacetime_heat_sl_kernel_antiderivative.h
* @brief Kernel for uniform_spacetime_tensor_mesh.h.
*/
#ifndef INCLUDE_BESTHEA_SPACETIME_HEAT_SL_KERNEL_ANTIDERIVATIVE_H_
#define INCLUDE_BESTHEA_SPACETIME_HEAT_SL_KERNEL_ANTIDERIVATIVE_H_
#include <besthea/spacetime_heat_kernel_antiderivative.h>
#include "besthea/settings.h"
#include <vector>
namespace besthea {
namespace bem {
class spacetime_heat_sl_kernel_antiderivative;
}
}
/**
* Class representing a first and second antiderivative of the single-layer
* spacetime kernel.
*/
class besthea::bem::spacetime_heat_sl_kernel_antiderivative
: public besthea::bem::spacetime_heat_kernel_antiderivative<
spacetime_heat_sl_kernel_antiderivative > {
public:
/**
* Constructor.
* @param[in] alpha Heat conductivity.
*/
spacetime_heat_sl_kernel_antiderivative( sc alpha )
: spacetime_heat_kernel_antiderivative<
spacetime_heat_sl_kernel_antiderivative >( alpha ) {
}
/**
* Destructor.
*/
virtual ~spacetime_heat_sl_kernel_antiderivative( ) {
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t( sc xy1, sc xy2, sc xy3, [[maybe_unused]] const sc * nx,
[[maybe_unused]] const sc * ny, sc ttau ) const {
sc value;
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
if ( ttau > _eps ) {
if ( norm > _eps ) { // ttau > 0, norm > 0
value = ( ttau / ( _four * _pi * _alpha * norm )
+ norm / ( _eight * _pi * _alpha2 ) )
* std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) )
+ sqrt_d / ( _four * _pi * _alpha * _sqrt_pi * _sqrt_alpha )
* std::exp( -( norm * norm ) / ( _four * ttau * _alpha ) );
} else { // ttau > 0, limit for norm -> 0
value = sqrt_d / ( _two * _pi * _alpha * _sqrt_pi * _sqrt_alpha );
}
} else { // limit for ttau -> 0, assuming norm > 0
value = norm / ( _eight * _pi * _alpha2 );
}
return value;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t_regular_in_time( sc xy1, sc xy2, sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
sc ttau ) const {
sc value;
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
if ( norm > _eps ) { // delta > 0, norm > 0
value = ( ttau / ( _four * _pi * _alpha * norm )
+ norm / ( _eight * _pi * _alpha2 ) )
* std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) )
+ sqrt_d / ( _four * _pi * _alpha * _sqrt_pi * _sqrt_alpha )
* std::exp( -( norm * norm ) / ( _four * ttau * _alpha ) );
} else { // delta > 0, limit for norm -> 0
value = sqrt_d / ( _two * _pi * _alpha * _sqrt_pi * _sqrt_alpha );
}
return value;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t_regular_in_time_regular_in_space( sc xy1, sc xy2,
sc xy3, [[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
sc ttau ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
// ttau > 0, norm > 0
sc value = ( ttau / ( _four * _pi * _alpha * norm )
+ norm / ( _eight * _pi * _alpha2 ) )
* std::erf( norm / ( _two * sqrt_d * _sqrt_alpha ) )
+ sqrt_d / ( _four * _pi * _alpha * _sqrt_pi * _sqrt_alpha )
* std::exp( -( norm * norm ) / ( _four * ttau * _alpha ) );
return value;
}
/**
* Evaluates the second antiderivative.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
*/
#pragma omp declare simd uniform( this, nx, ny ) simdlen( DATA_WIDTH )
sc do_anti_tau_anti_t_limit_in_time_regular_in_space( sc xy1, sc xy2, sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
// limit for ttau -> 0, assuming norm > 0
sc value = norm / ( _eight * _pi * _alpha2 );
return value;
}
/**
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] ttau `t-tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, ttau ) simdlen( DATA_WIDTH )
sc do_anti_tau_regular( sc xy1, sc xy2, sc xy3,
[[maybe_unused]] const sc * nx, [[maybe_unused]] const sc * ny,
sc ttau ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc sqrt_d = std::sqrt( ttau );
sc value = std::erf( norm / ( _two * _sqrt_alpha * sqrt_d ) )
/ ( _four * _pi * _alpha * norm );
return value;
}
/**
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
*/
#pragma omp declare simd uniform( this, nx, ny ) simdlen( DATA_WIDTH )
sc do_anti_tau_limit( sc xy1, sc xy2, sc xy3, [[maybe_unused]] const sc * nx,
[[maybe_unused]] const sc * ny ) const {
sc norm = std::sqrt( xy1 * xy1 + xy2 * xy2 + xy3 * xy3 );
sc value = _one / ( _four * _pi * _alpha * norm );
return value;
}
/**
* Evaluates the definite integral over the same time interval.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] t0 Start of interval.
* @param[in] t1 End of interval.
*/
#pragma omp declare simd uniform( this, nx, ny, t0, t1 ) simdlen( DATA_WIDTH )
sc do_definite_integral_over_same_interval(
sc xy1, sc xy2, sc xy3, const sc * nx, const sc * ny, sc t0, sc t1 ) const {
sc value = ( t1 - t0 ) * do_anti_tau_limit( xy1, xy2, xy3, nx, ny )
- do_anti_tau_anti_t_regular_in_time( xy1, xy2, xy3, nx, ny, t1 - t0 )
+ do_anti_tau_anti_t_limit_in_time_regular_in_space(
xy1, xy2, xy3, nx, ny );
return value;
}
/**
* Evaluates the definite integral over the different time intervals.
* @param[in] xy1 First coordinate of `x - y`.
* @param[in] xy2 Second coordinate of `x - y`.
* @param[in] xy3 Third coordinate of `x - y`.
* @param[in] nx Normal in the `x` variable.
* @param[in] ny Normal in the `y` variable.
* @param[in] t0 Start of interval in `t`.
* @param[in] t1 End of interval in `t`.
* @param[in] tau0 Start of interval in `tau`.
* @param[in] tau1 End of interval in `tau`.
*/
#pragma omp declare simd uniform( this, nx, ny, t0, t1, tau0, tau1 ) \
simdlen( DATA_WIDTH )
sc do_definite_integral_over_different_intervals( sc xy1, sc xy2, sc xy3,
const sc * nx, const sc * ny, sc t0, sc t1, sc tau0, sc tau1 ) const {
sc value = do_anti_tau_anti_t( xy1, xy2, xy3, nx, ny, t1 - tau1 )
- do_anti_tau_anti_t( xy1, xy2, xy3, nx, ny, t1 - tau0 )
- do_anti_tau_anti_t( xy1, xy2, xy3, nx, ny, t0 - tau1 )
+ do_anti_tau_anti_t( xy1, xy2, xy3, nx, ny, t0 - tau0 );
return value;
}
};
#endif /* INCLUDE_BESTHEA_SPACETIME_HEAT_SL_KERNEL_ANTIDERIVATIVE_H_ \
*/
|
Fig_12.22_ompTargDat.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1024
int main()
{
float *a, *b, *c, *d;
int i;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
c = (float*)malloc(N*sizeof(float));
d = (float*)malloc(N*sizeof(float));
// initialize a, b, c, and d (code not shown)
#pragma omp target data map(to:a[0:N],b[0:N],c[0:N]) map(tofrom:d[0:N])
{
#pragma omp target
#pragma omp teams distribute parallel for simd
for (i = 0; i < N; i++)
c[i] += a[i] * b[i];
#pragma omp target
#pragma omp teams distribute parallel for simd
for (i = 0; i < N; i++)
d[i] += a[i] + c[i];
}
// continue in the program but only using d (not c)
}
|
dbjac2.c | #include "dbjac2.h"
#include "dzjac2.h"
#ifdef D8JAC2_PARAMS
#error D8JAC2_PARAMS already defined
#else /* !D8JAC2_PARAMS */
#define D8JAC2_PARAMS \
DZJAC2_PARAMS; \
register const VD huge = _mm512_set1_pd(DBL_MAX); \
register const VD be = _mm512_set1_pd(DBL_BIG_EXP)
#endif /* ?D8JAC2_PARAMS */
#ifdef D8JAC2_LOOP
#error D8JAC2_LOOP already defined
#else /* !D8JAC2_LOOP */
#define D8JAC2_LOOP \
register VD a1 = _mm512_load_pd(a11 + i); VDP(a1); \
register VD a2 = _mm512_load_pd(a22 + i); VDP(a2); \
register VD ar = _mm512_load_pd(a21 + i); VDP(ar); \
register const VD e1 = _mm512_sub_pd(be, _mm512_getexp_pd(a1)); VDP(e1); \
register const VD e2 = _mm512_sub_pd(be, _mm512_getexp_pd(a2)); VDP(e2); \
register const VD er = _mm512_sub_pd(be, _mm512_getexp_pd(ar)); VDP(er); \
register VD es = _mm512_min_pd(_mm512_min_pd(e1, e2), _mm512_min_pd(er, huge)); VDP(es); \
ar = _mm512_scalef_pd(ar, es); VDP(ar); \
a1 = _mm512_scalef_pd(a1, es); VDP(a1); \
a2 = _mm512_scalef_pd(a2, es); VDP(a2); \
register const VD aa = VDABS(ar); VDP(aa); \
register const VD as = VDSGN(ar); VDP(as); \
es = VDNEG(es); VDP(es); \
register const VD ab = _mm512_scalef_pd(aa, one); VDP(ab); \
register const VD ad = _mm512_sub_pd(a1, a2); VDP(ad); \
register const VD t2 = VDOR(_mm512_min_pd(_mm512_max_pd(_mm512_div_pd(ab, VDABS(ad)), zero), sh), VDSGN(ad)); VDP(t2); \
register const VD t1 = _mm512_div_pd(t2, _mm512_add_pd(one, _mm512_sqrt_pd(_mm512_fmadd_pd(t2, t2, one)))); VDP(t1); \
register const VD s2 = _mm512_fmadd_pd(t1, t1, one); VDP(s2); \
register const VD s1 = _mm512_sqrt_pd(s2); VDP(s1); \
register const VD co = _mm512_div_pd(one, s1); VDP(co); \
_mm512_store_pd((at + i), VDXOR(t1, as)); \
register const VD L1 = _mm512_div_pd(_mm512_fmadd_pd(t1, _mm512_fmadd_pd(a2, t1, ab), a1), s2); VDP(L1); \
_mm512_store_pd((c + i), co); \
register const VD L2 = _mm512_div_pd(_mm512_fmadd_pd(t1, _mm512_fmsub_pd(a1, t1, ab), a2), s2); VDP(L2); \
_mm512_store_pd((l1 + i), _mm512_scalef_pd(L1, es)); \
register const MD P = _mm512_cmplt_pd_mask(L1, L2); MDP(P); \
_mm512_store_pd((l2 + i), _mm512_scalef_pd(L2, es)); \
p[i >> VDLlg] = MD2U(P)
#endif /* ?D8JAC2_LOOP */
// return the sines instead of the tangents
#ifdef D8JACL_LOOP
#error D8JACL_LOOP already defined
#else /* !D8JACL_LOOP */
#define D8JACL_LOOP \
register VD a1 = _mm512_load_pd(a11 + i); VDP(a1); \
register VD a2 = _mm512_load_pd(a22 + i); VDP(a2); \
register VD ar = _mm512_load_pd(a21 + i); VDP(ar); \
register const VD e1 = _mm512_sub_pd(be, _mm512_getexp_pd(a1)); VDP(e1); \
register const VD e2 = _mm512_sub_pd(be, _mm512_getexp_pd(a2)); VDP(e2); \
register const VD er = _mm512_sub_pd(be, _mm512_getexp_pd(ar)); VDP(er); \
register VD es = _mm512_min_pd(_mm512_min_pd(e1, e2), _mm512_min_pd(er, huge)); VDP(es); \
ar = _mm512_scalef_pd(ar, es); VDP(ar); \
a1 = _mm512_scalef_pd(a1, es); VDP(a1); \
a2 = _mm512_scalef_pd(a2, es); VDP(a2); \
register const VD aa = VDABS(ar); VDP(aa); \
register const VD as = VDSGN(ar); VDP(as); \
es = VDNEG(es); VDP(es); \
register const VD ab = _mm512_scalef_pd(aa, one); VDP(ab); \
register const VD ad = _mm512_sub_pd(a1, a2); VDP(ad); \
register const VD t2 = VDOR(_mm512_min_pd(_mm512_max_pd(_mm512_div_pd(ab, VDABS(ad)), zero), sh), VDSGN(ad)); VDP(t2); \
register const VD t1 = _mm512_div_pd(t2, _mm512_add_pd(one, _mm512_sqrt_pd(_mm512_fmadd_pd(t2, t2, one)))); VDP(t1); \
register const VD s2 = _mm512_fmadd_pd(t1, t1, one); VDP(s2); \
register const VD s1 = _mm512_sqrt_pd(s2); VDP(s1); \
register const VD co = _mm512_div_pd(one, s1); VDP(co); \
_mm512_store_pd((at + i), _mm512_div_pd(VDXOR(t1, as), s1)); \
register const VD L1 = _mm512_div_pd(_mm512_fmadd_pd(t1, _mm512_fmadd_pd(a2, t1, ab), a1), s2); VDP(L1); \
_mm512_store_pd((c + i), co); \
register const VD L2 = _mm512_div_pd(_mm512_fmadd_pd(t1, _mm512_fmsub_pd(a1, t1, ab), a2), s2); VDP(L2); \
_mm512_store_pd((l1 + i), _mm512_scalef_pd(L1, es)); \
register const MD P = _mm512_cmplt_pd_mask(L1, L2); MDP(P); \
_mm512_store_pd((l2 + i), _mm512_scalef_pd(L2, es)); \
p[i >> VDLlg] = MD2U(P)
#endif /* ?D8JACL_LOOP */
fint dbjac2_(const fint n[static restrict 1], const double a11[static restrict VDL], const double a22[static restrict VDL], const double a21[static restrict VDL], double c[static restrict VDL], double at[static restrict VDL], double l1[static restrict VDL], double l2[static restrict VDL], unsigned p[static restrict 1])
{
#ifndef NDEBUG
if (IS_NOT_VFPENV)
return -10;
if (*n & VDL_1)
return -1;
if (IS_NOT_ALIGNED(a11))
return -2;
if (IS_NOT_ALIGNED(a22))
return -3;
if (IS_NOT_ALIGNED(a21))
return -4;
if (IS_NOT_ALIGNED(c))
return -5;
if (IS_NOT_ALIGNED(at))
return -6;
if (IS_NOT_ALIGNED(l1))
return -7;
if (IS_NOT_ALIGNED(l2))
return -8;
#endif /* !NDEBUG */
if (*n < 0) {
const fnat _n = (fnat)-*n;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(_n,a11,a22,a21,c,at,l1,l2,p)
for (fnat i = 0u; i < _n; i += VDL) {
D8JAC2_PARAMS;
D8JACL_LOOP;
}
return 1;
#else /* !_OPENMP */
D8JAC2_PARAMS;
for (fnat i = 0u; i < _n; i += VDL) {
D8JACL_LOOP;
}
return 0;
#endif /* ?_OPENMP */
}
else {
const fnat _n = (fnat)*n;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(_n,a11,a22,a21,c,at,l1,l2,p)
for (fnat i = 0u; i < _n; i += VDL) {
D8JAC2_PARAMS;
D8JAC2_LOOP;
}
return 1;
#else /* !_OPENMP */
D8JAC2_PARAMS;
for (fnat i = 0u; i < _n; i += VDL) {
D8JAC2_LOOP;
}
return 0;
#endif /* ?_OPENMP */
}
}
// store the secants instead of the cosines
#ifdef D8JACI_LOOP
#error D8JACI_LOOP already defined
#else /* !D8JACI_LOOP */
#define D8JACI_LOOP \
register VD a1 = _mm512_load_pd(a11 + i); VDP(a1); \
register VD a2 = _mm512_load_pd(a22 + i); VDP(a2); \
register VD ar = _mm512_load_pd(a21 + i); VDP(ar); \
register const VD e1 = _mm512_sub_pd(be, _mm512_getexp_pd(a1)); VDP(e1); \
register const VD e2 = _mm512_sub_pd(be, _mm512_getexp_pd(a2)); VDP(e2); \
register const VD er = _mm512_sub_pd(be, _mm512_getexp_pd(ar)); VDP(er); \
register VD es = _mm512_min_pd(_mm512_min_pd(e1, e2), _mm512_min_pd(er, huge)); VDP(es); \
ar = _mm512_scalef_pd(ar, es); VDP(ar); \
a1 = _mm512_scalef_pd(a1, es); VDP(a1); \
a2 = _mm512_scalef_pd(a2, es); VDP(a2); \
register const VD aa = VDABS(ar); VDP(aa); \
register const VD as = VDSGN(ar); VDP(as); \
es = VDNEG(es); VDP(es); \
register const VD ab = _mm512_scalef_pd(aa, one); VDP(ab); \
register const VD ad = _mm512_sub_pd(a1, a2); VDP(ad); \
register const VD t2 = VDOR(_mm512_min_pd(_mm512_max_pd(_mm512_div_pd(ab, VDABS(ad)), zero), sh), VDSGN(ad)); VDP(t2); \
register const VD t1 = _mm512_div_pd(t2, _mm512_add_pd(one, _mm512_sqrt_pd(_mm512_fmadd_pd(t2, t2, one)))); VDP(t1); \
register const VD s2 = _mm512_fmadd_pd(t1, t1, one); VDP(s2); \
register const VD s1 = _mm512_sqrt_pd(s2); VDP(s1); \
_mm512_store_pd((at + i), VDXOR(t1, as)); \
register const VD L1 = _mm512_div_pd(_mm512_fmadd_pd(t1, _mm512_fmadd_pd(a2, t1, ab), a1), s2); VDP(L1); \
_mm512_store_pd((c + i), s1); \
register const VD L2 = _mm512_div_pd(_mm512_fmadd_pd(t1, _mm512_fmsub_pd(a1, t1, ab), a2), s2); VDP(L2); \
_mm512_store_pd((l1 + i), _mm512_scalef_pd(L1, es)); \
register const MD P = _mm512_cmplt_pd_mask(L1, L2); MDP(P); \
_mm512_store_pd((l2 + i), _mm512_scalef_pd(L2, es)); \
p[i >> VDLlg] = MD2U(P)
#endif /* ?D8JACI_LOOP */
// for internal use only
fint dbjac2i(const fint n[static restrict 1], const double a11[static restrict VDL], const double a22[static restrict VDL], const double a21[static restrict VDL], double c[static restrict VDL], double at[static restrict VDL], double l1[static restrict VDL], double l2[static restrict VDL], unsigned p[static restrict 1])
{
#ifndef NDEBUG
if (IS_NOT_VFPENV)
return -10;
if (*n & VDL_1)
return -1;
if (IS_NOT_ALIGNED(a11))
return -2;
if (IS_NOT_ALIGNED(a22))
return -3;
if (IS_NOT_ALIGNED(a21))
return -4;
if (IS_NOT_ALIGNED(c))
return -5;
if (IS_NOT_ALIGNED(at))
return -6;
if (IS_NOT_ALIGNED(l1))
return -7;
if (IS_NOT_ALIGNED(l2))
return -8;
#endif /* !NDEBUG */
if (*n < 0) {
const fnat _n = (fnat)-*n;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(_n,a11,a22,a21,c,at,l1,l2,p)
for (fnat i = 0u; i < _n; i += VDL) {
if (p[i >> VDLlg]) {
D8JAC2_PARAMS;
D8JACI_LOOP;
}
}
return 1;
#else /* !_OPENMP */
D8JAC2_PARAMS;
for (fnat i = 0u; i < _n; i += VDL) {
if (p[i >> VDLlg]) {
D8JACI_LOOP;
}
}
return 0;
#endif /* ?_OPENMP */
}
else {
const fnat _n = (fnat)*n;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(_n,a11,a22,a21,c,at,l1,l2,p)
for (fnat i = 0u; i < _n; i += VDL) {
if (p[i >> VDLlg]) {
D8JAC2_PARAMS;
D8JAC2_LOOP;
}
}
return 1;
#else /* !_OPENMP */
D8JAC2_PARAMS;
for (fnat i = 0u; i < _n; i += VDL) {
if (p[i >> VDLlg]) {
D8JAC2_LOOP;
}
}
return 0;
#endif /* ?_OPENMP */
}
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/token-private.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if ((double) image->delay > floor(geometry_info.rho+0.5))
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
}
else
if ((flags & LessValue) != 0)
{
if ((double) image->delay < floor(geometry_info.rho+0.5))
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
else
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
if ((flags & SigmaValue) != 0)
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace);
append_image->depth=depth;
append_image->matte=matte;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict append_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (next->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((next->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) CastDoubleToLong(floor(scale*
image->page.width+0.5));
clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5));
clone_image->tile_offset.x=CastDoubleToLong(ceil(scale*
image->tile_offset.x-0.5));
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) CastDoubleToLong(floor(scale*
image->page.height+0.5));
clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5));
clone_image->tile_offset.y=CastDoubleToLong(ceil(scale*
image->tile_offset.y-0.5));
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows) == MagickFalse)
{
InheritException(exception,&clone_image->exception);
clone_image=DestroyImage(clone_image);
}
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return the highest severity exception.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
const IndexPacket
*magick_restrict source_indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
const char
*p;
int
c;
MagickBooleanType
canonical;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse)
return(strlen(filename));
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MaxTextExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
char
*r;
ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),value,(size_t)
(MaxTextExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(value)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
else
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
const IndexPacket
*indexes;
const PixelPacket
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const void
*pixels;
MagickBooleanType
status;
MagickSizeType
length;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset((void *) pixels,0,(size_t) length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,sizeof(PixelPacket));
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
indexes[x]=0;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
pixel.opacity=OpaqueOpacity;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
IndexPacket
*magick_restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize",
image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth == 0)
{
image->depth=8;
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageDepthNotSupported","`%s'",image->filename);
}
if (image->depth > (8*sizeof(MagickSizeType)))
{
image->depth=8*sizeof(MagickSizeType);
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageDepthNotSupported","`%s'",image->filename);
}
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,extension);
if (*extension != '\0')
{
char
path[MaxTextExtent];
/*
Base path sans any compression extension.
*/
GetPathComponent(image_info->filename,BasePathSansCompressExtension,path);
GetPathComponent(path,ExtensionPath,extension);
}
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*extension != '\0') && (IsGlob(extension) == MagickFalse))
{
MagickFormatType
format_type;
ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,filename);
else
GetPathComponent(image_info->filename,SubcanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to a seekable temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(filename);
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(filename);
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) memset(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
const PixelPacket
*p;
ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
const PixelPacket
*p;
ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelPacket *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.blue_primary.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.green_primary.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->x_resolution=geometry_info.rho;
image->y_resolution=image->x_resolution;
if ((flags & SigmaValue) != 0)
image->y_resolution=geometry_info.sigma;
}
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.white_point.y=geometry_info.sigma;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
DataMatrix.h | #ifndef K_CV_DATAMATRIX_H
#define K_CV_DATAMATRIX_H
#include <vector>
#include <iostream>
#include <functional>
#include "../Assertions.h"
namespace K {
/**
* matrix-like 2D data store
*/
template <typename T> class DataMatrix {
public:
static T scalar;
protected:
/** the underlying data */
std::vector<T> data;
/** the data's width */
int width;
/** the data's height */
int height;
public:
/** empty ctor */
DataMatrix() : width(0), height(0) {
;
}
/** ctor with data */
DataMatrix(const T* data, const int width, const int height) : width(width), height(height) {
const int numElems = width*height;
this->data.insert(this->data.end(), data, data+numElems);
}
/** ctor without data */
DataMatrix(const int width, const int height) : width(width), height(height) {
const int numElems = width*height;
data.resize(numElems);
}
/** get the number of elements */
size_t getSizeElements() const {
return data.size();
}
/** get the data-size in bytes */
size_t getSizeBytes() const {
return data.size() * sizeof(T);
}
// /** no-copy */
// DataMatrix(const DataMatrix& o) = delete;
// /** move */
// DataMatrix(const DataMatrix&& o) {
// this->width = o.width;
// this->height = o.height;
// this->data = std::move(o.data);
// }
// DataMatrix& operator=(DataMatrix&& o) {
// this->width = o.width;
// this->height = o.height;
// this->data = std::move(o.data);
// return *this;
// }
decltype(data.begin()) begin() {return data.begin();}
decltype(data.end()) end() {return data.end();}
/** constant array access */
const T operator () (const int x, const int y) const {return get(x,y);}
/** get the image's width */
inline int getWidth() const {return width;}
// /** get the image's width */
// inline int cols() const {return width;}
/** get the image's height */
inline int getHeight() const {return height;}
// /** get the image's height */
// inline int rows() const {return height;}
/** get the underyling data array */
T* getData() {return data.data();}
/** get the underyling data array */
inline const T* getData() const {return data.data();}
/** get the value at (x,y) */
inline T get(const int x, const int y) const {
_assertBetween(x, 0, getWidth()-1, "x out of bounds: " + std::to_string(x));
_assertBetween(y, 0, getHeight()-1, "y out of bounds: " + std::to_string(y));
return data[x + y*width];
}
/** get the value at (x,y) */
inline const T& getConstRef(const int x, const int y) const {
_assertBetween(x, 0, getWidth()-1, "x out of bounds: " + std::to_string(x));
_assertBetween(y, 0, getHeight()-1, "y out of bounds: " + std::to_string(y));
return data[x + y*width];
}
/** get the value at (x,y) */
inline T& get(const int x, const int y) {
_assertBetween(x, 0, getWidth()-1, "x out of bounds: " + std::to_string(x));
_assertBetween(y, 0, getHeight()-1, "y out of bounds: " + std::to_string(y));
return data[x + y*width];
}
/** set the value at (x,y) */
inline void set(const int x, const int y, const T v) {
_assertBetween(x, 0, getWidth()-1, "x out of bounds: " + std::to_string(x));
_assertBetween(y, 0, getHeight()-1, "y out of bounds: " + std::to_string(y));
data[x + y*width] = v;
}
/** is the given location contained within the matrix? */
inline bool isWithin(const int x, const int y) const {
return (x >= 0) && (y >= 0) && (x < width) && (y < height);
}
/** set all entries to the given value */
void setAll(const T v) {
std::fill(data.begin(), data.end(), v);
}
/** debug output */
friend std::ostream& operator << (std::ostream& out, const DataMatrix& m) {
for (int y = 0; y < m.getHeight(); ++y) {
for (int x = 0; x < m.getWidth(); ++x) {
out << m.get(x,y) << '\t';
}
out << std::endl;
}
return out;
}
bool operator == (const DataMatrix& o) const {
if (getWidth() != o.getWidth()) {return false;}
if (getHeight() != o.getHeight()) {return false;}
const int numElems = width*height;
for (int i = 0; i < numElems; ++i) {
if (data[i] != o.data[i]) {return false;}
}
return true;
}
/** call the given function for each of the channels's pixels.*/
void forEach(std::function<void(const int, const int, const T)> exec) const {
// run function for each element
for (int y = 0; y < height; ++y) {
#pragma omp parallel for
for (int x = 0; x < width; ++x) {
exec(x, y, get(x,y));
}
}
}
/** add the given matrix */
DataMatrix<T>& operator += (const DataMatrix<T>& o) {
ensureEqualSize(*this, o);
for (int i = 0; i < (int) data.size(); ++i) { data[i] += o.data[i]; }
return *this;
}
/** subtract the given matrix */
DataMatrix<T>& operator -= (const DataMatrix<T>& o) {
ensureEqualSize(*this, o);
for (int i = 0; i < (int) data.size(); ++i) { data[i] -= o.data[i]; }
return *this;
}
/** multiply the given matrix */
DataMatrix<T>& operator *= (const DataMatrix<T>& o) {
ensureEqualSize(*this, o);
for (int i = 0; i < (int) data.size(); ++i) { data[i] *= o.data[i]; }
return *this;
}
/** add the given value */
DataMatrix<T>& operator += (const T val) {
for (int i = 0; i < (int) data.size(); ++i) { data[i] += val; }
return *this;
}
/** multiply the given value */
DataMatrix<T>& operator *= (const T val) {
for (int i = 0; i < (int) data.size(); ++i) { data[i] *= val; }
return *this;
}
/** divide by the given value */
DataMatrix<T>& operator /= (const T val) {
for (int i = 0; i < (int) data.size(); ++i) { data[i] /= val; }
return *this;
}
template <typename DM> DM operator + (const DM& o) const {
ensureEqualSize(*this, o);
DM copy = *((DM*)this);
copy += o;
return copy;
}
template <typename DM> DM operator - (const DM& o) const {
ensureEqualSize(*this, o);
DM copy = *((DM*)this);
copy -= o;
return copy;
}
template <typename DM> DM operator * (const float val) const {
DM copy = *((DM*)this);
copy *= val;
return copy;
}
private:
inline void ensureEqualSize(const DataMatrix& m1, const DataMatrix& m2) const {
if (m1.getSizeElements() != m2.getSizeElements()) {
throw Exception("size mismatch!");
}
}
};
}
#endif // K_CV_DATAMATRIX_H
|
scatter_double_avx2.c | // create a list of 64 numbers, and only sum the even ones
#include <stdio.h>
#include <stdlib.h>
#define N 32000
#define SCALE 8
int main() {
srand(time(NULL));
double *numbers = malloc(sizeof(double)*N);
double *result1 = malloc(sizeof(double)*N);
double *result2 = malloc(sizeof(double)*N);
int *mask = malloc(sizeof(int)*N);
// Init the numbers
for (int i = 0; i<N; i++) numbers[i] = rand() % 10;
for (int i = 0; i<N; i++) { result1[i] = 0; result2[i] = 0; }
for (int i = 0; i<N; i++) mask[i] = rand() % N;
for (int i = 0; i<SCALE; i++) printf("%.1f ", numbers[i]);
puts("\n---");
for (int i = 0; i<SCALE; i++) printf("%d ", mask[i]);
puts("\n---");
puts("---------------------------------------------");
//Serial
for (int i = 0; i<SCALE; i++) {
result1[mask[i]] = numbers[mask[i]];
}
#pragma omp simd simdlen(SCALE)
for (int i = 0; i<SCALE; i++) {
result2[mask[i]] = numbers[mask[i]];
}
// print
for (int i = 0; i<SCALE; i++) printf("%.1f ", result1[i]);
puts("\n---");
for (int i = 0; i<SCALE; i++) printf("%.1f ", result2[i]);
puts("\n---");
int errors = 0;
for (int i = 0; i<SCALE; i++) {
if (result1[i] != result2[i]) ++errors;
}
printf("Errors: %d\n", errors);
//printf("Result1: %f | Result2: %f\n", result1, result2);
return 0;
}
|
lis_matrix_vbr.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <stdarg.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
/************************************************
* lis_matrix_set
* lis_matrix_malloc
* lis_matrix_copy
* lis_matrix_convert
* lis_matrix_get_diagonal
* lis_matrix_scaling
* lis_matrix_scaling_symm
* lis_matrix_normf
* lis_matrix_transpose
************************************************/
#undef __FUNC__
#define __FUNC__ "lis_matrix_set_vbr"
LIS_INT lis_matrix_set_vbr(LIS_INT nnz, LIS_INT nr, LIS_INT nc, LIS_INT bnnz, LIS_INT *row, LIS_INT *col, LIS_INT *ptr, LIS_INT *bptr, LIS_INT *bindex, LIS_SCALAR *value, LIS_MATRIX A)
{
LIS_INT err;
LIS_DEBUG_FUNC_IN;
#if 0
err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET);
if( err ) return err;
#else
if(lis_matrix_is_assembled(A)) return LIS_SUCCESS;
else {
err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET);
if( err ) return err;
}
#endif
A->row = row;
A->col = col;
A->ptr = ptr;
A->bptr = bptr;
A->bindex = bindex;
A->value = value;
A->is_copy = LIS_FALSE;
A->status = -LIS_MATRIX_VBR;
A->is_block = LIS_TRUE;
A->nnz = nnz;
A->bnnz = bnnz;
A->nr = nr;
A->nc = nc;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_malloc_vbr"
LIS_INT lis_matrix_malloc_vbr(LIS_INT n, LIS_INT nnz, LIS_INT nr, LIS_INT nc, LIS_INT bnnz, LIS_INT **row, LIS_INT **col, LIS_INT **ptr, LIS_INT **bptr, LIS_INT **bindex, LIS_SCALAR **value)
{
LIS_DEBUG_FUNC_IN;
*row = NULL;
*col = NULL;
*ptr = NULL;
*bptr = NULL;
*bindex = NULL;
*value = NULL;
*row = (LIS_INT *)lis_malloc( (nr+1)*sizeof(LIS_INT),"lis_matrix_malloc_vbr::row" );
if( *row==NULL )
{
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
lis_free2(6,*row,*col,*ptr,*bptr,*bindex,*value);
return LIS_FAILS;
}
*col = (LIS_INT *)lis_malloc( (nc+1)*sizeof(LIS_INT),"lis_matrix_malloc_vbr::col" );
if( *col==NULL )
{
LIS_SETERR_MEM((nc+1)*sizeof(LIS_INT));
lis_free2(6,*row,*col,*ptr,*bptr,*bindex,*value);
return LIS_FAILS;
}
*ptr = (LIS_INT *)lis_malloc( (bnnz+1)*sizeof(LIS_INT),"lis_matrix_malloc_vbr::ptr" );
if( *ptr==NULL )
{
LIS_SETERR_MEM((bnnz+1)*sizeof(LIS_INT));
lis_free2(6,*row,*col,*ptr,*bptr,*bindex,*value);
return LIS_FAILS;
}
*bptr = (LIS_INT *)lis_malloc( (nr+1)*sizeof(LIS_INT),"lis_matrix_malloc_vbr::bptr" );
if( *bptr==NULL )
{
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
lis_free2(6,*row,*col,*ptr,*bptr,*bindex,*value);
return LIS_FAILS;
}
*bindex = (LIS_INT *)lis_malloc( bnnz*sizeof(LIS_INT),"lis_matrix_malloc_vbr::bindex" );
if( *bindex==NULL )
{
LIS_SETERR_MEM(bnnz*sizeof(LIS_INT));
lis_free2(6,*row,*col,*ptr,*bptr,*bindex,*value);
return LIS_OUT_OF_MEMORY;
}
*value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_malloc_vbr::value" );
if( *value==NULL )
{
LIS_SETERR_MEM(nnz*sizeof(LIS_SCALAR));
lis_free2(6,*row,*col,*ptr,*bptr,*bindex,*value);
return LIS_OUT_OF_MEMORY;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_elements_copy_vbr"
LIS_INT lis_matrix_elements_copy_vbr(LIS_INT n, LIS_INT nr, LIS_INT nc, LIS_INT bnnz, LIS_INT *row, LIS_INT *col, LIS_INT *ptr, LIS_INT *bptr, LIS_INT *bindex, LIS_SCALAR *value,
LIS_INT *o_row, LIS_INT *o_col, LIS_INT *o_ptr, LIS_INT *o_bptr, LIS_INT *o_bindex, LIS_SCALAR *o_value)
{
LIS_INT bi,bj,i,j,k;
LIS_DEBUG_FUNC_IN;
#ifdef _OPENMP
#pragma omp parallel private(bi,bj,i,j,k)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<nr+1;i++)
{
o_row[i] = row[i];
o_bptr[i] = bptr[i];
}
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<nc+1;i++)
{
o_col[i] = col[i];
}
#ifdef _OPENMP
#pragma omp for
#endif
for(bi=0;bi<nr;bi++)
{
for(bj=bptr[bi];bj<bptr[bi+1];bj++)
{
k = ptr[bj];
for(j=col[bindex[bj]];j<col[bindex[bj]+1];j++)
{
for(i=row[bi];i<row[bi+1];i++)
{
o_value[k] = value[k];
k++;
}
}
o_bindex[bj] = bindex[bj];
o_ptr[bj+1] = ptr[bj+1];
}
}
o_ptr[0] = ptr[0];
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_copy_vbr"
LIS_INT lis_matrix_copy_vbr(LIS_MATRIX Ain, LIS_MATRIX Aout)
{
LIS_INT err;
LIS_INT n,nnz,bnnz,nr,nc;
LIS_INT *row,*col,*ptr,*bptr,*bindex;
LIS_SCALAR *value;
LIS_DEBUG_FUNC_IN;
n = Ain->n;
nnz = Ain->nnz;
bnnz = Ain->bnnz;
nr = Ain->nr;
nc = Ain->nc;
err = lis_matrix_malloc_vbr(n,nnz,nr,nc,bnnz,&row,&col,&ptr,&bptr,&bindex,&value);
if( err )
{
return err;
}
lis_matrix_elements_copy_vbr(n,nr,nc,bnnz,Ain->row,Ain->col,Ain->ptr,Ain->bptr,Ain->bindex,Ain->value,row,col,ptr,bptr,bindex,value);
err = lis_matrix_set_vbr(nnz,nr,nc,bnnz,row,col,ptr,bptr,bindex,value,Aout);
if( err )
{
lis_free2(6,row,col,ptr,bptr,bindex,value);
return err;
}
err = lis_matrix_assemble(Aout);
if( err )
{
lis_matrix_storage_destroy(Aout);
return err;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_get_vbr_rowcol"
LIS_INT lis_matrix_get_vbr_rowcol(LIS_MATRIX Ain, LIS_INT *nr, LIS_INT *nc, LIS_INT **row, LIS_INT **col)
{
LIS_INT i,j,k,jj,kk,n;
LIS_INT *iw;
LIS_DEBUG_FUNC_IN;
n = Ain->n;
iw = NULL;
iw = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_get_vbr_rowcol::iw" );
if( iw==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
for(i=0;i<n+1;i++) iw[i] = 0;
for(i=0;i<n;i++)
{
if( Ain->ptr[i]<Ain->ptr[i+1] )
{
jj = Ain->index[Ain->ptr[i]];
iw[jj] = 1;
for(j=Ain->ptr[i]+1;j<Ain->ptr[i+1];j++)
{
jj = Ain->index[j];
kk = Ain->index[j-1];
if( kk!=jj-1 )
{
iw[jj] = 1;
iw[kk+1] = 1;
}
}
iw[jj+1] = 1;
}
}
k=0;
iw[0] = 0;
for(i=1;i<n+1;i++)
{
if( iw[i]!=0 )
{
k++;
iw[k] = i;
}
}
*nr = k;
*nc = k;
*row = (LIS_INT *)lis_malloc((k+1)*sizeof(LIS_INT),"lis_matrix_get_vbr_rowcol::row");
if( (*row)==NULL )
{
LIS_SETERR_MEM((k+1)*sizeof(LIS_INT));
lis_free(iw);
return LIS_OUT_OF_MEMORY;
}
*col = (LIS_INT *)lis_malloc((k+1)*sizeof(LIS_INT),"lis_matrix_get_vbr_rowcol::col");
if( (*col)==NULL )
{
LIS_SETERR_MEM((k+1)*sizeof(LIS_INT));
lis_free2(2,iw,*row);
return LIS_OUT_OF_MEMORY;
}
memcpy(*row,iw,(k+1)*sizeof(LIS_INT));
memcpy(*col,iw,(k+1)*sizeof(LIS_INT));
lis_free(iw);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#if 0
#undef __FUNC__
#define __FUNC__ "lis_matrix_get_vbr_rowcol"
LIS_INT lis_matrix_get_vbr_rowcol(LIS_MATRIX Ain, LIS_INT *nr, LIS_INT *nc, LIS_INT **row, LIS_INT **col)
{
LIS_INT i,j,k,l,n;
LIS_INT ii,jj,kk,ret;
LIS_INT bnnz,bj,bnr,bnc,jpos,nnz,ij,kv,bi;
LIS_INT err;
LIS_INT gn,nprocs,my_rank;
LIS_INT is,ie,pe;
LIS_INT *iw;
LIS_INT ac,oc,count;
LIS_INT p[3][5],and[3],or[3];
LIS_DEBUG_FUNC_IN;
n = Ain->n;
gn = Ain->gn;
nprocs = Ain->nprocs;
my_rank = Ain->my_rank;
is = Ain->is;
ie = Ain->ie;
iw = NULL;
iw = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_get_vbr_rowcol::iw" );
if( iw==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
memset(p[0],0,15*sizeof(LIS_INT));
count = 0;
k = 0;
for(i=0;i<n;i++)
{
kk = i - k;
for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++)
{
jj = Ain->index[j] - k;
if( jj>=0 && jj<5 )
{
p[kk][jj] = 1;
}
}
if( kk==1 )
{
and[0] = p[kk-1][0] & p[kk][0];
and[1] = p[kk-1][1] & p[kk][1];
and[2] = p[kk-1][2] & p[kk][2];
ac = and[0] + and[1] + and[2];
if( ac==0 )
{
memcpy(p[0],&p[1][1],3*sizeof(LIS_INT));
memset(p[1],0,5*sizeof(LIS_INT));
iw[count++] = 1;
k++;
}
else if( ac==1 )
{
if( and[0]==1 || and[1]==1 )
{
memset(p[0],0,10*sizeof(LIS_INT));
iw[count++] = 2;
k += 2;
}
else
{
memcpy(p[0],&p[1][1],3*sizeof(LIS_INT));
memset(p[1],0,5*sizeof(LIS_INT));
iw[count++] = 1;
k++;
}
}
else
{
or[0] = p[kk-1][0] | p[kk][0];
or[1] = p[kk-1][1] | p[kk][1];
or[2] = p[kk-1][2] | p[kk][2];
oc = or[0] + or[1] + or[2];
if( oc==2 )
{
memset(p[0],0,10*sizeof(LIS_INT));
iw[count++] = 2;
k += 2;
}
}
}
else if( kk==2 )
{
oc = p[kk][0] + p[kk][1] + p[kk][2];
if( ac==2 )
{
if( oc==3 )
{
memset(p[0],0,15*sizeof(LIS_INT));
iw[count++] = 3;
k += 3;
}
else
{
memcpy(p[0],&p[2][2],3*sizeof(LIS_INT));
memset(p[1],0,10*sizeof(LIS_INT));
iw[count++] = 2;
k += 2;
}
}
else
{
if( oc==1 )
{
memcpy(p[0],&p[2][2],3*sizeof(LIS_INT));
memset(p[1],0,10*sizeof(LIS_INT));
iw[count++] = 2;
k += 2;
}
else
{
memset(p[0],0,15*sizeof(LIS_INT));
iw[count++] = 3;
k += 3;
}
}
}
}
if( k<n )
{
iw[count++] = 1;
}
*nr = count;
*nc = count;
*row = (LIS_INT *)lis_malloc((count+1)*sizeof(LIS_INT),"lis_matrix_get_vbr_rowcol::row");
if( (*row)==NULL )
{
LIS_SETERR_MEM((count+1)*sizeof(LIS_INT));
lis_free(iw);
return LIS_OUT_OF_MEMORY;
}
*col = (LIS_INT *)lis_malloc((count+1)*sizeof(LIS_INT),"lis_matrix_get_vbr_rowcol::col");
if( (*col)==NULL )
{
LIS_SETERR_MEM((count+1)*sizeof(LIS_INT));
lis_free2(2,iw,*row);
return LIS_OUT_OF_MEMORY;
}
(*row)[0] = (*col)[0] = 0;
for(i=0;i<count;i++)
{
(*row)[i+1] = (*row)[i] + iw[i];
(*col)[i+1] = (*col)[i] + iw[i];
}
lis_free(iw);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#endif
#undef __FUNC__
#define __FUNC__ "lis_matrix_convert_crs2vbr"
LIS_INT lis_matrix_convert_crs2vbr(LIS_MATRIX Ain, LIS_MATRIX Aout)
{
LIS_INT i,j,k,n;
LIS_INT ii,jj,kk,ret;
LIS_INT bnnz,bj,bnr,jpos,nnz,ij,kv,bi;
LIS_INT err;
LIS_INT gn,nprocs,my_rank;
LIS_INT nr,nc;
LIS_INT is,ie;
LIS_INT *iw,*iw2,*count,*p2bindex;
LIS_INT *bptr,*bindex,*ptr;
LIS_INT *row, *col;
LIS_SCALAR *value;
LIS_Comm comm;
LIS_DEBUG_FUNC_IN;
nr = Aout->conv_bnr;
nc = Aout->conv_bnc;
row = Aout->conv_row;
col = Aout->conv_col;
if( row==NULL || col==NULL )
{
lis_matrix_sort_crs(Ain);
err = lis_matrix_get_vbr_rowcol(Ain,&nr,&nc,&row,&col);
if( err ) return err;
}
n = Ain->n;
gn = Ain->gn;
nprocs = Ain->nprocs;
my_rank = Ain->my_rank;
comm = Ain->comm;
is = Ain->is;
ie = Ain->ie;
ptr = NULL;
value = NULL;
bptr = NULL;
bindex = NULL;
iw = NULL;
iw2 = NULL;
count = NULL;
p2bindex = NULL;
bptr = (LIS_INT *)lis_malloc( (nr+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::bptr" );
if( bptr==NULL )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
p2bindex = (LIS_INT *)lis_malloc( gn*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::p2bindex" );
if( p2bindex==NULL )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
LIS_SETERR_MEM(gn*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
count = (LIS_INT *)lis_malloc( (nr+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::count" );
if( count==NULL )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
#ifdef _OPENMP
#pragma omp parallel for private(i,j)
#endif
for(i=0;i<nc;i++)
{
for(j=col[i];j<col[i+1];j++)
{
p2bindex[j] = i;
}
}
#ifdef _OPENMP
#pragma omp parallel private(i,bnr,k,ii,j,bj,kk,ij,jj,iw,iw2,kv,jpos)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<nr+1;i++) count[i] = 0;
iw = (LIS_INT *)lis_malloc( nc*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::iw" );
iw2 = (LIS_INT *)lis_malloc( nc*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::iw2" );
memset(iw,0,nc*sizeof(LIS_INT));
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<nr;i++)
{
k = 0;
kk = row[i];
bnr = row[i+1] - row[i];
jj = 0;
for(ii=0;ii+kk<n&&ii<bnr;ii++)
{
for(j=Ain->ptr[kk+ii];j<Ain->ptr[kk+ii+1];j++)
{
bj = p2bindex[Ain->index[j]];
jpos = iw[bj];
if( jpos==0 )
{
iw[bj] = 1;
iw2[jj] = bj;
jj++;
}
}
}
for(bj=0;bj<jj;bj++)
{
k++;
ii = iw2[bj];
iw[ii]=0;
count[i+1] += bnr*(col[ii+1]-col[ii]);
}
bptr[i+1] = k;
}
lis_free(iw);
lis_free(iw2);
}
bptr[0] = 0;
for(i=0;i<nr;i++)
{
bptr[i+1] += bptr[i];
}
bnnz = bptr[nr];
for(i=0;i<nr;i++)
{
count[i+1] += count[i];
}
nnz = count[nr];
ptr = (LIS_INT *)lis_malloc( (bnnz+1)*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::ptr" );
if( ptr==NULL )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
LIS_SETERR_MEM((bnnz+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
bindex = (LIS_INT *)lis_malloc( bnnz*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::bindex" );
if( bindex==NULL )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
LIS_SETERR_MEM(bnnz*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_convert_crs2vbr::value" );
if( value==NULL )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
LIS_SETERR_MEM(nnz*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
/* convert vbr */
#ifdef _OPENMP
#pragma omp parallel private(bi,i,ii,k,j,bj,jpos,kv,kk,ij,jj,iw,bnr,ret)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<nr;i++)
{
j = bptr[i];
ptr[j] = count[i];
}
iw = (LIS_INT *)lis_malloc( nc*sizeof(LIS_INT),"lis_matrix_convert_crs2vbr::iw" );
memset(iw,0,nc*sizeof(LIS_INT));
#ifdef _OPENMP
#pragma omp for
#endif
for(bi=0;bi<nr;bi++)
{
i = row[bi];
ii = 0;
kk = bptr[bi];
kv = ptr[kk];
bnr = row[bi+1] - row[bi];
while( i+ii<n && ii<bnr )
{
for( k=Ain->ptr[i+ii];k<Ain->ptr[i+ii+1];k++)
{
bj = p2bindex[Ain->index[k]];
j = Ain->index[k] - col[bj];
jpos = iw[bj];
if( jpos==0 )
{
ret = bnr * (col[bj+1]-col[bj]);
ij = j*bnr + ii;
memset(&value[kv], 0, ret*sizeof(LIS_SCALAR));
bindex[kk] = bj;
value[kv+ij] = Ain->value[k];
iw[bj] = kv+1;
kv += ret;
ptr[kk+1] = kv;
kk = kk+1;
}
else
{
ij = j*bnr + ii;
value[jpos+ij-1] = Ain->value[k];
}
}
ii = ii+1;
}
for(j=bptr[bi];j<bptr[bi+1];j++)
{
iw[bindex[j]] = 0;
}
}
lis_free(iw);
}
err = lis_matrix_set_vbr(nnz,nr,nc,bnnz,row,col,ptr,bptr,bindex,value,Aout);
if( err )
{
lis_free2(6,ptr,value,bptr,bindex,count,p2bindex);
return err;
}
err = lis_matrix_assemble(Aout);
if( err )
{
lis_free2(2,count,p2bindex);
lis_matrix_storage_destroy(Aout);
return err;
}
lis_free2(2,count,p2bindex);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_convert_vbr2crs"
LIS_INT lis_matrix_convert_vbr2crs(LIS_MATRIX Ain, LIS_MATRIX Aout)
{
LIS_INT i,j,k,l;
LIS_INT nr,nc,bnr,bnc,bi,bj;
LIS_INT err;
LIS_INT n,nnz,is;
LIS_INT *ptr,*index;
LIS_SCALAR *value;
n = Ain->n;
nr = Ain->nr;
nc = Ain->nc;
is = Ain->is;
ptr = NULL;
index = NULL;
value = NULL;
ptr = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_vbr2crs::ptr" );
if( ptr==NULL )
{
LIS_SETERR_MEM((n+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
#ifdef _OPENMP
#pragma omp parallel private(i,j,k,bi,bj,bnr,bnc)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(bi=0;bi<nr;bi++)
{
k = Ain->row[bi];
bnr = Ain->row[bi+1]-Ain->row[bi];
for(i=0;i<bnr;i++)
{
ptr[k+i+1] = 0;
}
}
#ifdef _OPENMP
#pragma omp for
#endif
for(bi=0;bi<nr;bi++)
{
k = Ain->row[bi];
bnr = Ain->row[bi+1]-Ain->row[bi];
for(bj=Ain->bptr[bi];bj<Ain->bptr[bi+1];bj++)
{
bnc = Ain->col[Ain->bindex[bj]+1] - Ain->col[Ain->bindex[bj]];
for(j=0;j<bnc;j++)
{
for(i=0;i<bnr;i++)
{
if( Ain->value[Ain->ptr[bj] + j*bnr + i] != (LIS_SCALAR)0.0 )
{
ptr[k+i+1]++;
}
}
}
}
}
}
ptr[0] = 0;
for(i=0;i<n;i++)
{
ptr[i+1] += ptr[i];
}
nnz = ptr[n];
index = (LIS_INT *)lis_malloc( nnz*sizeof(LIS_INT),"lis_matrix_convert_vbr2crs::index" );
if( index==NULL )
{
lis_free2(3,ptr,index,value);
LIS_SETERR_MEM(nnz*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_convert_vbr2crs::value" );
if( value==NULL )
{
lis_free2(3,ptr,index,value);
LIS_SETERR_MEM(nnz*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
/* convert crs */
#ifdef _OPENMP
#pragma omp parallel for private(i,j,k,l,bi,bj,bnr,bnc)
#endif
for(bi=0;bi<nr;bi++)
{
l = Ain->row[bi];
bnr = Ain->row[bi+1]-Ain->row[bi];
for(i=0;i<bnr;i++)
{
k = ptr[l+i];
for(bj=Ain->bptr[bi];bj<Ain->bptr[bi+1];bj++)
{
bnc = Ain->col[Ain->bindex[bj]+1] - Ain->col[Ain->bindex[bj]];
for(j=0;j<bnc;j++)
{
if( Ain->value[Ain->ptr[bj] + j*bnr + i] != (LIS_SCALAR)0.0 )
{
value[k] = Ain->value[Ain->ptr[bj] + j*bnr + i];
index[k] = Ain->col[Ain->bindex[bj]]+j;
k++;
}
}
}
}
}
err = lis_matrix_set_crs(nnz,ptr,index,value,Aout);
if( err )
{
lis_free2(3,ptr,index,value);
return err;
}
err = lis_matrix_assemble(Aout);
if( err )
{
lis_matrix_storage_destroy(Aout);
return err;
}
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_get_diagonal_vbr"
LIS_INT lis_matrix_get_diagonal_vbr(LIS_MATRIX A, LIS_SCALAR d[])
{
LIS_INT i,j,k,bi,bj,bjj,nr,nc;
LIS_INT bnr,bnc;
LIS_INT n;
LIS_DEBUG_FUNC_IN;
n = A->n;
nr = A->nr;
nc = A->nc;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for private(i,j,bnr)
#endif
for(i=0;i<nr;i++)
{
bnr = A->D->bns[i];
for(j=0;j<bnr;j++)
{
d[A->L->row[i]+j] = A->D->v_value[i][j*bnr+j];
}
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(bi,bj,bjj,bnr,bnc,i,j,k)
#endif
for(bi=0;bi<nr;bi++)
{
k = 0;
i = A->row[bi];
bnr = A->row[bi+1] - A->row[bi];
for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++)
{
bjj = A->bindex[bj];
bnc = A->col[bjj+1] - A->col[bjj];
if( i>=bjj*bnc && i<(bjj+1)*bnc )
{
for(j=i%bnc;j<bnc&&k<bnr&&i<n;j++)
{
d[i] = A->value[A->ptr[bj] + j*bnr + k];
i++;
k++;
}
}
if( k==bnr ) break;
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_scaling_vbr"
LIS_INT lis_matrix_scaling_vbr(LIS_MATRIX A, LIS_SCALAR d[])
{
LIS_INT i,j,k;
LIS_INT bi,bj;
LIS_INT nr,nc;
LIS_INT n;
LIS_DEBUG_FUNC_IN;
n = A->n;
nr = A->nr;
nc = A->nc;
if( A->is_splited )
{
#ifdef _OPENMP
#pragma omp parallel for private(bi,bj,i,j,k)
#endif
for(bi=0;bi<nr;bi++)
{
k = A->L->ptr[A->L->bptr[bi]];
for(bj=A->L->bptr[bi];bj<A->L->bptr[bi+1];bj++)
{
for(j=A->L->col[A->bindex[bj]];j<A->L->col[A->bindex[bj]+1];j++)
{
for(i=A->L->row[bi];i<A->L->row[bi+1];i++)
{
A->L->value[k] *= d[i];
k++;
}
}
}
k = A->U->ptr[A->U->bptr[bi]];
for(bj=A->U->bptr[bi];bj<A->U->bptr[bi+1];bj++)
{
for(j=A->U->col[A->U->bindex[bj]];j<A->U->col[A->U->bindex[bj]+1];j++)
{
for(i=A->U->row[bi];i<A->U->row[bi+1];i++)
{
A->U->value[k] *= d[i];
k++;
}
}
}
k = 0;
for(j=A->U->col[bi];j<A->U->col[bi+1];j++)
{
for(i=A->U->row[bi];i<A->U->row[bi+1];i++)
{
A->D->v_value[bi][k] *= d[i];
k++;
}
}
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(bi,bj,i,j,k)
#endif
for(bi=0;bi<nr;bi++)
{
k = A->ptr[A->bptr[bi]];
for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++)
{
for(j=A->col[A->bindex[bj]];j<A->col[A->bindex[bj]+1];j++)
{
for(i=A->row[bi];i<A->row[bi+1];i++)
{
A->value[k] *= d[i];
k++;
}
}
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_scaling_symm_vbr"
LIS_INT lis_matrix_scaling_symm_vbr(LIS_MATRIX A, LIS_SCALAR d[])
{
LIS_INT i,j,k;
LIS_INT bi,bj;
LIS_INT nr,nc;
LIS_INT n;
LIS_DEBUG_FUNC_IN;
n = A->n;
nr = A->nr;
nc = A->nc;
if( A->is_splited )
{
LIS_SETERR_IMP;
return LIS_ERR_NOT_IMPLEMENTED;
}
else
{
#ifdef _OPENMP
#pragma omp parallel for private(bi,bj,i,j,k)
#endif
for(bi=0;bi<nr;bi++)
{
k = A->ptr[A->bptr[bi]];
for(bj=A->bptr[bi];bj<A->bptr[bi+1];bj++)
{
for(j=A->col[A->bindex[bj]];j<A->col[A->bindex[bj]+1];j++)
{
for(i=A->row[bi];i<A->row[bi+1];i++)
{
A->value[k] = A->value[k]*d[i]*d[j];
k++;
}
}
}
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_split_vbr"
LIS_INT lis_matrix_split_vbr(LIS_MATRIX A)
{
LIS_INT i,j,jj,n;
LIS_INT nr,nc,bs;
LIS_INT nnzl,nnzu,bnnzl,bnnzu;
LIS_INT err;
LIS_INT *lrow,*lcol,*lptr,*lbptr,*lbindex;
LIS_INT *urow,*ucol,*uptr,*ubptr,*ubindex;
LIS_SCALAR *lvalue,*uvalue;
LIS_MATRIX_DIAG D;
#ifdef _OPENMP
LIS_INT ku,kl,kbu,kbl;
LIS_INT *liw,*uiw,*liw2,*uiw2;
#endif
LIS_DEBUG_FUNC_IN;
n = A->n;
nr = A->nr;
nc = A->nc;
nnzl = 0;
nnzu = 0;
bnnzl = 0;
bnnzu = 0;
D = NULL;
lrow = NULL;
lcol = NULL;
lptr = NULL;
lbptr = NULL;
lbindex = NULL;
lvalue = NULL;
urow = NULL;
ucol = NULL;
uptr = NULL;
ubptr = NULL;
ubindex = NULL;
uvalue = NULL;
#ifdef _OPENMP
liw = (LIS_INT *)lis_malloc((nr+1)*sizeof(LIS_INT),"lis_matrix_split_vbr::liw");
if( liw==NULL )
{
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
uiw = (LIS_INT *)lis_malloc((nr+1)*sizeof(LIS_INT),"lis_matrix_split_vbr::uiw");
if( uiw==NULL )
{
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
liw2 = (LIS_INT *)lis_malloc((nr+1)*sizeof(LIS_INT),"lis_matrix_split_vbr::liw2");
if( liw2==NULL )
{
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
uiw2 = (LIS_INT *)lis_malloc((nr+1)*sizeof(LIS_INT),"lis_matrix_split_vbr::uiw2");
if( uiw2==NULL )
{
LIS_SETERR_MEM((nr+1)*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
#pragma omp parallel for private(i)
for(i=0;i<nr+1;i++)
{
liw[i] = 0;
uiw[i] = 0;
liw2[i] = 0;
uiw2[i] = 0;
}
#pragma omp parallel for private(i,j,jj)
for(i=0;i<nr;i++)
{
for(j=A->bptr[i];j<A->bptr[i+1];j++)
{
jj = A->bindex[j];
if( jj<i )
{
liw[i+1]++;
liw2[i+1] += (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
}
else if( jj>i )
{
uiw[i+1]++;
uiw2[i+1] += (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
}
}
}
for(i=0;i<nr;i++)
{
liw[i+1] += liw[i];
uiw[i+1] += uiw[i];
liw2[i+1] += liw2[i];
uiw2[i+1] += uiw2[i];
}
bnnzl = liw[nr];
bnnzu = uiw[nr];
nnzl = liw2[nr];
nnzu = uiw2[nr];
#else
for(i=0;i<nr;i++)
{
for(j=A->bptr[i];j<A->bptr[i+1];j++)
{
jj = A->bindex[j];
if( jj<i )
{
nnzl++;
bnnzl += (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
}
else if( jj>i )
{
nnzu++;
bnnzu += (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
}
}
}
#endif
err = lis_matrix_LU_create(A);
if( err )
{
return err;
}
err = lis_matrix_malloc_vbr(n,nnzl,nr,nc,bnnzl,&lrow,&lcol,&lptr,&lbptr,&lbindex,&lvalue);
if( err )
{
return err;
}
err = lis_matrix_malloc_vbr(n,nnzu,nr,nc,bnnzu,&urow,&ucol,&uptr,&ubptr,&ubindex,&uvalue);
if( err )
{
lis_free2(6,lptr,lbindex,lvalue,uptr,ubindex,uvalue);
return err;
}
err = lis_matrix_diag_duplicateM(A,&D);
if( err )
{
lis_free2(6,lptr,lbindex,lvalue,uptr,ubindex,uvalue);
return err;
}
#ifdef _OPENMP
#pragma omp parallel for private(i)
for(i=0;i<nr+1;i++)
{
lrow[i] = A->row[i];
urow[i] = A->row[i];
}
#pragma omp parallel for private(i)
for(i=0;i<nc+1;i++)
{
lcol[i] = A->col[i];
ucol[i] = A->col[i];
}
#pragma omp parallel for private(i)
for(i=0;i<nr+1;i++)
{
lbptr[i] = liw[i];
ubptr[i] = uiw[i];
}
#pragma omp parallel for private(i)
for(i=0;i<nr;i++)
{
lptr[lbptr[i]] = liw2[i];
uptr[ubptr[i]] = uiw2[i];
}
#pragma omp parallel for private(i,j,kl,ku,kbl,kbu,jj,bs)
for(i=0;i<nr;i++)
{
kbl = lbptr[i];
kbu = ubptr[i];
kl = liw2[i];
ku = uiw2[i];
for(j=A->bptr[i];j<A->bptr[i+1];j++)
{
jj = A->bindex[j];
if( jj<i )
{
lbindex[kbl] = jj;
bs = (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
lptr[kbl+1] = lptr[kbl] + bs;
memcpy(&lvalue[kl],&A->value[A->ptr[j]],bs*sizeof(LIS_SCALAR));;
kbl++;
kl += bs;
}
else if( jj>i )
{
ubindex[kbu] = jj;
bs = (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
uptr[kbu+1] = uptr[kbu] + bs;
memcpy(&uvalue[ku],&A->value[A->ptr[j]],bs*sizeof(LIS_SCALAR));;
kbu++;
ku += bs;
}
else
{
bs = (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
memcpy(D->v_value[i],&A->value[A->ptr[j]],bs*sizeof(LIS_SCALAR));
}
}
}
lis_free2(4,liw,uiw,liw2,uiw2);
#else
for(i=0;i<nr+1;i++)
{
lrow[i] = A->row[i];
urow[i] = A->row[i];
}
for(i=0;i<nc+1;i++)
{
lcol[i] = A->col[i];
ucol[i] = A->col[i];
}
nnzl = 0;
nnzu = 0;
bnnzl = 0;
bnnzu = 0;
lptr[0] = 0;
uptr[0] = 0;
lbptr[0] = 0;
ubptr[0] = 0;
for(i=0;i<nr;i++)
{
for(j=A->bptr[i];j<A->bptr[i+1];j++)
{
jj = A->bindex[j];
if( jj<i )
{
lbindex[bnnzl] = jj;
bs = (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
memcpy(&lvalue[nnzl],&A->value[A->ptr[j]],bs*sizeof(LIS_SCALAR));;
nnzl += bs;
bnnzl++;
lptr[bnnzl] = nnzl;
}
else if( jj>i )
{
ubindex[bnnzu] = jj;
bs = (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
memcpy(&uvalue[nnzu],&A->value[A->ptr[j]],bs*sizeof(LIS_SCALAR));;
nnzu += bs;
bnnzu++;
uptr[bnnzu] = nnzu;
}
else
{
bs = (A->row[i+1]-A->row[i]) * (A->col[jj+1]-A->col[jj]);
memcpy(D->v_value[i],&A->value[A->ptr[j]],bs*sizeof(LIS_SCALAR));
}
}
lbptr[i+1] = bnnzl;
ubptr[i+1] = bnnzu;
}
#endif
A->L->nr = nr;
A->L->nc = nc;
A->L->nnz = nnzl;
A->L->bnnz = bnnzl;
A->L->ptr = lptr;
A->L->row = lrow;
A->L->col = lcol;
A->L->bptr = lbptr;
A->L->bindex = lbindex;
A->L->value = lvalue;
A->U->nr = nr;
A->U->nc = nc;
A->U->nnz = nnzu;
A->U->bnnz = bnnzu;
A->U->ptr = uptr;
A->U->row = urow;
A->U->col = ucol;
A->U->bptr = ubptr;
A->U->bindex = ubindex;
A->U->value = uvalue;
A->D = D;
A->is_splited = LIS_TRUE;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_merge_vbr"
LIS_INT lis_matrix_merge_vbr(LIS_MATRIX A)
{
LIS_INT i,j,jj,n,nnz;
LIS_INT bnnz,nr,nc,bs;
LIS_INT err;
LIS_INT *row,*col,*ptr,*bptr,*bindex;
LIS_SCALAR *value;
LIS_DEBUG_FUNC_IN;
n = A->n;
nr = A->nr;
nc = A->nc;
nnz = A->nnz;
row = NULL;
col = NULL;
ptr = NULL;
bptr = NULL;
bindex = NULL;
value = NULL;
bnnz = A->L->bnnz + A->U->bnnz + nr;
err = lis_matrix_malloc_vbr(n,nnz,nr,nc,bnnz,&row,&col,&ptr,&bptr,&bindex,&value);
if( err )
{
return err;
}
bnnz = 0;
nnz = 0;
bptr[0] = 0;
ptr[0] = 0;
for(i=0;i<nr+1;i++)
{
row[i] = A->L->row[i];
}
for(i=0;i<nc+1;i++)
{
col[i] = A->L->col[i];
}
for(i=0;i<nr;i++)
{
for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++)
{
jj = A->L->bindex[j];
bindex[bnnz] = jj;
bs = (A->L->row[i+1]-A->L->row[i]) * (A->L->col[jj+1]-A->L->col[jj]);
memcpy(&value[nnz],&A->L->value[A->L->ptr[j]],bs*sizeof(LIS_SCALAR));
bnnz++;
nnz += bs;
ptr[bnnz] = nnz;
}
bindex[bnnz] = i;
bs = A->D->bns[i] * A->D->bns[i];
memcpy(&value[nnz],A->D->v_value[i],bs*sizeof(LIS_SCALAR));
bnnz++;
nnz += bs;
ptr[bnnz] = nnz;
for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++)
{
jj = A->U->bindex[j];
bindex[bnnz] = jj;
bs = (A->U->row[i+1]-A->U->row[i]) * (A->U->col[jj+1]-A->U->col[jj]);
memcpy(&value[nnz],&A->U->value[A->U->ptr[j]],bs*sizeof(LIS_SCALAR));
bnnz++;
nnz += bs;
ptr[bnnz] = nnz;
}
bptr[i+1] = bnnz;
}
A->bnnz = bnnz;
A->ptr = ptr;
A->row = row;
A->col = col;
A->bptr = bptr;
A->value = value;
A->bindex = bindex;
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_solve_vbr"
LIS_INT lis_matrix_solve_vbr(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag)
{
LIS_INT i,j,k,ii,jj,nr,bnr,bnc,bs,dim,sz;
LIS_SCALAR t0,t1,t2;
LIS_SCALAR *b,*x,w[1024];
LIS_DEBUG_FUNC_IN;
nr = A->nr;
b = B->value;
x = X->value;
switch(flag)
{
case LIS_MATRIX_LOWER:
lis_vector_copy(B,X);
for(i=0;i<nr;i++)
{
dim = A->L->row[i+1] - A->L->row[i];
bnr = A->L->row[i];
for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++)
{
jj = A->L->bindex[j];
sz = A->L->col[jj+1] - A->L->col[jj];
lis_array_matvec2(dim,sz,&A->L->value[A->L->ptr[j]],dim,&x[A->L->col[jj]],&x[bnr],LIS_SUB_VALUE);
}
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
memcpy(&x[bnr],w,dim*sizeof(LIS_SCALAR));
}
break;
case LIS_MATRIX_UPPER:
lis_vector_copy(B,X);
for(i=nr-1;i>=0;i--)
{
dim = A->U->row[i+1] - A->U->row[i];
bnr = A->U->row[i];
for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++)
{
jj = A->U->bindex[j];
sz = A->U->col[jj+1] - A->U->col[jj];
lis_array_matvec2(dim,sz,&A->U->value[A->U->ptr[j]],dim,&x[A->U->col[jj]],&x[bnr],LIS_SUB_VALUE);
}
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
memcpy(&x[bnr],w,dim*sizeof(LIS_SCALAR));
}
break;
case LIS_MATRIX_SSOR:
lis_vector_copy(B,X);
for(i=0;i<nr;i++)
{
dim = A->L->row[i+1] - A->L->row[i];
bnr = A->L->row[i];
for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++)
{
jj = A->L->bindex[j];
sz = A->L->col[jj+1] - A->L->col[jj];
lis_array_matvec2(dim,sz,&A->L->value[A->L->ptr[j]],dim,&x[A->L->col[jj]],&x[bnr],LIS_SUB_VALUE);
}
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
memcpy(&x[bnr],w,dim*sizeof(LIS_SCALAR));
}
for(i=nr-1;i>=0;i--)
{
dim = A->U->row[i+1] - A->U->row[i];
bnr = A->U->row[i];
memset(w,0,dim*sizeof(LIS_SCALAR));
for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++)
{
jj = A->U->bindex[j];
sz = A->U->col[jj+1] - A->U->col[jj];
lis_array_matvec2(dim,sz,&A->U->value[A->U->ptr[j]],dim,&x[A->U->col[jj]],w,LIS_ADD_VALUE);
}
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,w,&x[bnr],LIS_SUB_VALUE);
}
break;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_matrix_solvet_vbr"
LIS_INT lis_matrix_solvet_vbr(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag)
{
LIS_INT i,j,k,ii,jj,nr,bnr,bnc,bs,dim,sz;
LIS_SCALAR t0,t1,t2;
LIS_SCALAR *b,*x,w[1024];
LIS_DEBUG_FUNC_IN;
nr = A->nr;
b = B->value;
x = X->value;
switch(flag)
{
case LIS_MATRIX_LOWER:
lis_vector_copy(B,X);
for(i=0;i<nr;i++)
{
dim = A->U->row[i+1] - A->U->row[i];
bnr = A->U->row[i];
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
memcpy(&x[bnr],w,dim*sizeof(LIS_SCALAR));
for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++)
{
jj = A->U->bindex[j];
sz = A->U->col[jj+1] - A->U->col[jj];
lis_array_matvec2(dim,sz,&A->U->value[A->U->ptr[j]],dim,&x[A->U->col[jj]],&x[bnr],LIS_SUB_VALUE);
}
}
break;
case LIS_MATRIX_UPPER:
lis_vector_copy(B,X);
for(i=nr-1;i>=0;i--)
{
dim = A->L->row[i+1] - A->L->row[i];
bnr = A->L->row[i];
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
memcpy(&x[bnr],w,dim*sizeof(LIS_SCALAR));
for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++)
{
jj = A->L->bindex[j];
sz = A->L->col[jj+1] - A->L->col[jj];
lis_array_matvec2(dim,sz,&A->L->value[A->L->ptr[j]],dim,&x[A->L->col[jj]],&x[bnr],LIS_SUB_VALUE);
}
}
break;
case LIS_MATRIX_SSOR:
lis_vector_copy(B,X);
for(i=0;i<nr;i++)
{
dim = A->U->row[i+1] - A->U->row[i];
bnr = A->U->row[i];
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
for(j=A->U->bptr[i];j<A->U->bptr[i+1];j++)
{
jj = A->U->bindex[j];
sz = A->U->col[jj+1] - A->U->col[jj];
lis_array_matvec2(dim,sz,&A->U->value[A->U->ptr[j]],dim,w,&x[A->U->col[jj]],LIS_SUB_VALUE);
}
}
for(i=nr-1;i>=0;i--)
{
dim = A->L->row[i+1] - A->L->row[i];
bnr = A->L->row[i];
lis_array_matvec2(dim,dim,A->WD->v_value[i],dim,&x[bnr],w,LIS_INS_VALUE);
memcpy(&x[bnr],w,dim*sizeof(LIS_SCALAR));
for(j=A->L->bptr[i];j<A->L->bptr[i+1];j++)
{
jj = A->L->bindex[j];
sz = A->L->col[jj+1] - A->L->col[jj];
lis_array_matvec2(dim,sz,&A->L->value[A->L->ptr[j]],dim,w,&x[A->L->col[jj]],LIS_SUB_VALUE);
}
}
break;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
|
solver-omp-op1.c | #define lowerb(id, p, n) ( id * (n/p) + (id < (n%p) ? id : n%p) )
#define numElem(id, p, n) ( (n/p) + (id < (n%p)) )
#define upperb(id, p, n) ( lowerb(id, p, n) + numElem(id, p, n) - 1 )
#define min(a, b) ( (a < b) ? a : b )
#define max(a, b) ( (a > b) ? a : b )
#include "omp.h"
// Function to copy one matrix into another
void copy_mat (double *u, double *v, unsigned sizex, unsigned sizey) {
int numprocs = omp_get_num_threads();
#pragma omp parallel
{
int myid = omp_get_thread_num();
int i_start = lowerb(myid, numprocs, sizex);
int i_end = upperb(myid, numprocs, sizex);
for (int i=max(1, i_start); i<=min(sizex-2, i_end); i++) {
for (int j=1; j<=sizey-2; j++)
v[i*sizey+j] = u[i*sizey+j];
}
}
}
// 1D-blocked Jacobi solver: one iteration step
double relax_jacobi (double *u, double *utmp, unsigned sizex, unsigned sizey) {
double diff, sum=0.0;
int nblocks = 4;
int numprocs = omp_get_num_threads();
#pragma omp parallel private(diff) reduction(+: sum)
{
int myid = omp_get_thread_num();
int i_start = lowerb(myid, numprocs, sizex);
int i_end = upperb(numprocs-1, numprocs, sizex);
for (int i=max(1, i_start); i<=i_end; i++) {
for (int j=1; j<=sizey-2; j++)
{ int index = i*sizey+j;
if(index%nblocks == myid)
{
utmp[index] = 0.25 * ( u[ i*sizey + (j-1) ] + // left
u[ i*sizey + (j+1) ] + // right
u[ (i-1)*sizey + j ] + // top
u[ (i+1)*sizey + j ] ) ;// bottom
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
}
}
}
return sum;
}
// 2D-blocked Gauss-Seidel solver: one iteration step
double relax_gauss (double *u, unsigned sizex, unsigned sizey) {
double unew, diff, sum=0.0;
int numprocs=omp_get_max_threads();
#pragma omp parallel for ordered(2) private(unew,diff) reduction(+:sum)
for (int r = 0; r < numprocs; ++r) {
for (int c = 0; c < numprocs; ++c) {
int r_start = lowerb(r, numprocs, sizex);
int r_end = upperb(r, numprocs, sizex);
int c_start = lowerb(c, numprocs, sizey);
int c_end = upperb(c, numprocs, sizey);
#pragma omp ordered depend(sink: r-1, c)
for (int i=max(1, r_start); i<= min(sizex-2, r_end); i++) {
for (int j=max(1, c_start); j<= min(sizey-2,c_end); j++) {
unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left
u[ i*sizey + (j+1) ]+ // right
u[ (i-1)*sizey + j ]+ // top
u[ (i+1)*sizey + j ]); // bottom
diff = unew - u[i*sizey+ j];
sum += diff * diff;
u[i*sizey+j]=unew;
}
}
#pragma omp ordered depend(source)
}
}
return sum;
}
|
convolutiondepthwise_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * 9;
int* outptr = out;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * 9;
int* outptr = out;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
static void convdw3x3s1_int8_dequant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_dequant, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
float* outptr = out;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_dequant = scales_dequant[p];
out.fill(bias0);
const signed char* kernel0 = (const signed char*)kernel + p * 9;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr += (float)sum * scale_dequant;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_dequant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_dequant, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
float* outptr = out;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_dequant = scales_dequant[p];
out.fill(bias0);
const signed char* kernel0 = (const signed char*)kernel + p * 9;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr += (float)sum * scale_dequant;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
static void convdw3x3s1_int8_requant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
signed char* outptr = out;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
const signed char* kernel0 = (const signed char*)kernel + p * 9;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_requant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
signed char* outptr = out;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
const signed char* kernel0 = (const signed char*)kernel + p * 9;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * (int)kernel0[0];
sum += (int)r0[1] * (int)kernel0[1];
sum += (int)r0[2] * (int)kernel0[2];
sum += (int)r1[0] * (int)kernel0[3];
sum += (int)r1[1] * (int)kernel0[4];
sum += (int)r1[2] * (int)kernel0[5];
sum += (int)r2[0] * (int)kernel0[6];
sum += (int)r2[1] * (int)kernel0[7];
sum += (int)r2[2] * (int)kernel0[8];
*outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
} |
GB_binop__pair_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_int8)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// A pattern? 1
// B type: int8_t
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT8 || GxB_NO_PAIR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_binop__lxor_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp64)
// A*D function (colscale): GB (_AxD__lxor_fp64)
// D*A function (rowscale): GB (_DxB__lxor_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp64)
// C=scalar+B GB (_bind1st__lxor_fp64)
// C=scalar+B' GB (_bind1st_tran__lxor_fp64)
// C=A+scalar GB (_bind2nd__lxor_fp64)
// C=A'+scalar GB (_bind2nd_tran__lxor_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_FP64 || GxB_NO_LXOR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp2.c | // RUN: mlir-clang %s --function=* -fopenmp -S | FileCheck %s
void square2(double** x, int sstart, int send, int sinc, int tstart, int tend, int tinc) {
#pragma omp parallel for collapse(2)
for(int i=sstart; i < send; i+= sinc) {
for(int j=tstart; j < tend; j+= tinc) {
x[i][j] = i + j;
}
}
}
// CHECK: func @square2(%arg0: memref<?xmemref<?xf64>>, %arg1: i32, %arg2: i32, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32) attributes {llvm.linkage = #llvm.linkage<external>} {
// CHECK-NEXT: %c1 = arith.constant 1 : index
// CHECK-DAG: %[[i0:.+]] = arith.index_cast %arg1 : i32 to index
// CHECK-DAG: %[[i1:.+]] = arith.index_cast %arg2 : i32 to index
// CHECK-DAG: %[[i2:.+]] = arith.index_cast %arg3 : i32 to index
// CHECK-DAG: %[[i3:.+]] = arith.index_cast %arg4 : i32 to index
// CHECK-DAG: %[[i4:.+]] = arith.index_cast %arg5 : i32 to index
// CHECK-DAG: %[[i5:.+]] = arith.index_cast %arg6 : i32 to index
// CHECK-DAG: %6 = arith.subi %[[i1]], %[[i0]] : index
// CHECK-NEXT: %7 = arith.subi %6, %c1 : index
// CHECK-NEXT: %8 = arith.addi %7, %[[i2]] : index
// CHECK-NEXT: %9 = arith.divui %8, %[[i2]] : index
// CHECK-NEXT: %10 = arith.muli %9, %[[i2]] : index
// CHECK-NEXT: %11 = arith.addi %[[i0]], %10 : index
// CHECK-NEXT: %12 = arith.subi %[[i4]], %[[i3]] : index
// CHECK-NEXT: %13 = arith.subi %12, %c1 : index
// CHECK-NEXT: %14 = arith.addi %13, %[[i5]] : index
// CHECK-NEXT: %15 = arith.divui %14, %[[i5]] : index
// CHECK-NEXT: %16 = arith.muli %15, %[[i5]] : index
// CHECK-NEXT: %17 = arith.addi %[[i3:.+]], %16 : index
// CHECK-NEXT: scf.parallel (%arg7, %arg8) = (%[[i0]], %[[i3]]) to (%11, %17) step (%[[i2]], %[[i5]]) {
// CHECK-NEXT: %18 = arith.index_cast %arg7 : index to i64
// CHECK-NEXT: %19 = arith.index_cast %arg8 : index to i64
// CHECK-NEXT: %20 = memref.load %arg0[%arg7] : memref<?xmemref<?xf64>>
// CHECK-NEXT: %21 = arith.addi %18, %19 : i64
// CHECK-NEXT: %22 = arith.sitofp %21 : i64 to f64
// CHECK-NEXT: memref.store %22, %20[%arg8] : memref<?xf64>
// CHECK-NEXT: scf.yield
// CHECK-NEXT: }
// CHECK-NEXT: return
// CHECK-NEXT: }
|
FRICP.h | #ifndef FRICP_H
#define FRICP_H
#include "ICP.h"
#include <AndersonAcceleration.h>
#include <unsupported/Eigen/MatrixFunctions>
#include "median.h"
#include <limits>
#define SAME_THRESHOLD 1e-6
#include <type_traits>
template<class T>
typename std::enable_if<!std::numeric_limits<T>::is_integer, bool>::type
almost_equal(T x, T y, int ulp)
{
// the machine epsilon has to be scaled to the magnitude of the values used
// and multiplied by the desired precision in ULPs (units in the last place)
return std::fabs(x-y) <= std::numeric_limits<T>::epsilon() * std::fabs(x+y) * ulp
// unless the result is subnormal
|| std::fabs(x-y) < std::numeric_limits<T>::min();
}
template<int N>
class FRICP
{
public:
typedef double Scalar;
typedef Eigen::Matrix<Scalar, N, Eigen::Dynamic> MatrixNX;
typedef Eigen::Matrix<Scalar, N, N> MatrixNN;
typedef Eigen::Matrix<Scalar, N+1, N+1> AffineMatrixN;
typedef Eigen::Transform<Scalar, N, Eigen::Affine> AffineNd;
typedef Eigen::Matrix<Scalar, N, 1> VectorN;
typedef nanoflann::KDTreeAdaptor<MatrixNX, N, nanoflann::metric_L2_Simple> KDtree;
typedef Eigen::Matrix<Scalar, 6, 1> Vector6;
double test_total_construct_time=.0;
double test_total_solve_time=.0;
int test_total_iters=0;
FRICP(){};
~FRICP(){};
private:
AffineMatrixN LogMatrix(const AffineMatrixN& T)
{
Eigen::RealSchur<AffineMatrixN> schur(T);
AffineMatrixN U = schur.matrixU();
AffineMatrixN R = schur.matrixT();
std::vector<bool> selected(N, true);
MatrixNN mat_B = MatrixNN::Zero(N, N);
MatrixNN mat_V = MatrixNN::Identity(N, N);
for (int i = 0; i < N; i++)
{
if (selected[i] && fabs(R(i, i) - 1)> SAME_THRESHOLD)
{
int pair_second = -1;
for (int j = i + 1; j <N; j++)
{
if (fabs(R(j, j) - R(i, i)) < SAME_THRESHOLD)
{
pair_second = j;
selected[j] = false;
break;
}
}
if (pair_second > 0)
{
selected[i] = false;
R(i, i) = R(i, i) < -1 ? -1 : R(i, i);
double theta = acos(R(i, i));
if (R(i, pair_second) < 0)
{
theta = -theta;
}
mat_B(i, pair_second) += theta;
mat_B(pair_second, i) += -theta;
mat_V(i, pair_second) += -theta / 2;
mat_V(pair_second, i) += theta / 2;
double coeff = 1 - (theta * R(i, pair_second)) / (2 * (1 - R(i, i)));
mat_V(i, i) += -coeff;
mat_V(pair_second, pair_second) += -coeff;
}
}
}
AffineMatrixN LogTrim = AffineMatrixN::Zero();
LogTrim.block(0, 0, N, N) = mat_B;
LogTrim.block(0, N, N, 1) = mat_V * R.block(0, N, N, 1);
AffineMatrixN res = U * LogTrim * U.transpose();
return res;
}
inline Vector6 RotToEuler(const AffineNd& T)
{
Vector6 res;
res.head(3) = T.rotation().eulerAngles(0,1,2);
res.tail(3) = T.translation();
return res;
}
inline AffineMatrixN EulerToRot(const Vector6& v)
{
MatrixNN s (Eigen::AngleAxis<Scalar>(v(0), Vector3::UnitX())
* Eigen::AngleAxis<Scalar>(v(1), Vector3::UnitY())
* Eigen::AngleAxis<Scalar>(v(2), Vector3::UnitZ()));
AffineMatrixN m = AffineMatrixN::Zero();
m.block(0,0,3,3) = s;
m(3,3) = 1;
m.col(3).head(3) = v.tail(3);
return m;
}
inline Vector6 LogToVec(const Eigen::Matrix4d& LogT)
{
Vector6 res;
res[0] = -LogT(1, 2);
res[1] = LogT(0, 2);
res[2] = -LogT(0, 1);
res[3] = LogT(0, 3);
res[4] = LogT(1, 3);
res[5] = LogT(2, 3);
return res;
}
inline AffineMatrixN VecToLog(const Vector6& v)
{
AffineMatrixN m = AffineMatrixN::Zero();
m << 0, -v[2], v[1], v[3],
v[2], 0, -v[0], v[4],
-v[1], v[0], 0, v[5],
0, 0, 0, 0;
return m;
}
double FindKnearestMed(const KDtree& kdtree,
const MatrixNX& X, int nk)
{
Eigen::VectorXd X_nearest(X.cols());
#pragma omp parallel for
for(int i = 0; i<X.cols(); i++)
{
int* id = new int[nk];
double *dist = new double[nk];
kdtree.query(X.col(i).data(), nk, id, dist);
Eigen::VectorXd k_dist = Eigen::Map<Eigen::VectorXd>(dist, nk);
igl::median(k_dist.tail(nk-1), X_nearest[i]);
delete[]id;
delete[]dist;
}
double med;
igl::median(X_nearest, med);
return sqrt(med);
}
/// Find self normal edge median of point cloud
double FindKnearestNormMed(const KDtree& kdtree, const Eigen::Matrix3Xd & X, int nk, const Eigen::Matrix3Xd & norm_x)
{
Eigen::VectorXd X_nearest(X.cols());
#pragma omp parallel for
for(int i = 0; i<X.cols(); i++)
{
int* id = new int[nk];
double *dist = new double[nk];
kdtree.query(X.col(i).data(), nk, id, dist);
Eigen::VectorXd k_dist = Eigen::Map<Eigen::VectorXd>(dist, nk);
for(int s = 1; s<nk; s++)
{
k_dist[s] = std::abs((X.col(id[s]) - X.col(id[0])).dot(norm_x.col(id[0])));
}
igl::median(k_dist.tail(nk-1), X_nearest[i]);
delete[]id;
delete[]dist;
}
double med;
igl::median(X_nearest, med);
return med;
}
template <typename Derived1, typename Derived2, typename Derived3>
AffineNd point_to_point(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& w) {
int dim = X.rows();
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// De-mean
Eigen::VectorXd X_mean(dim), Y_mean(dim);
for (int i = 0; i<dim; ++i) {
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
Y_mean(i) = (Y.row(i).array()*w_normalized.transpose().array()).sum();
}
X.colwise() -= X_mean;
Y.colwise() -= Y_mean;
/// Compute transformation
AffineNd transformation;
MatrixXX sigma = X * w_normalized.asDiagonal() * Y.transpose();
Eigen::JacobiSVD<MatrixXX> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
if (svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) {
VectorN S = VectorN::Ones(dim); S(dim-1) = -1.0;
transformation.linear() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
}
else {
transformation.linear() = svd.matrixV()*svd.matrixU().transpose();
}
transformation.translation() = Y_mean - transformation.linear()*X_mean;
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += Y_mean;
/// Return transformation
return transformation;
}
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4, typename Derived5>
Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X,
Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& Norm,
const Eigen::MatrixBase<Derived4>& w,
const Eigen::MatrixBase<Derived5>& u) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 6, 1> Vector6;
typedef Eigen::Block<Matrix66, 3, 3> Block33;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// De-mean
Eigen::Vector3d X_mean;
for (int i = 0; i<3; ++i)
X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum();
X.colwise() -= X_mean;
Y.colwise() -= X_mean;
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Block33 TL = LHS.topLeftCorner<3, 3>();
Block33 TR = LHS.topRightCorner<3, 3>();
Block33 BR = LHS.bottomRightCorner<3, 3>();
Eigen::MatrixXd C = Eigen::MatrixXd::Zero(3, X.cols());
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i<X.cols(); i++) {
C.col(i) = X.col(i).cross(Norm.col(i));
}
#pragma omp sections nowait
{
#pragma omp section
for (int i = 0; i<X.cols(); i++) TL.selfadjointView<Eigen::Upper>().rankUpdate(C.col(i), w(i));
#pragma omp section
for (int i = 0; i<X.cols(); i++) TR += (C.col(i)*Norm.col(i).transpose())*w(i);
#pragma omp section
for (int i = 0; i<X.cols(); i++) BR.selfadjointView<Eigen::Upper>().rankUpdate(Norm.col(i), w(i));
#pragma omp section
for (int i = 0; i<C.cols(); i++) {
double dist_to_plane = -((X.col(i) - Y.col(i)).dot(Norm.col(i)) - u(i))*w(i);
RHS.head<3>() += C.col(i)*dist_to_plane;
RHS.tail<3>() += Norm.col(i)*dist_to_plane;
}
}
}
LHS = LHS.selfadjointView<Eigen::Upper>();
/// Compute transformation
Eigen::Affine3d transformation;
Eigen::LDLT<Matrix66> ldlt(LHS);
RHS = ldlt.solve(RHS);
transformation = Eigen::AngleAxisd(RHS(0), Eigen::Vector3d::UnitX()) *
Eigen::AngleAxisd(RHS(1), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(RHS(2), Eigen::Vector3d::UnitZ());
transformation.translation() = RHS.tail<3>();
/// Apply transformation
/// Re-apply mean
X.colwise() += X_mean;
Y.colwise() += X_mean;
transformation.translation() += X_mean - transformation.linear()*X_mean;
/// Return transformation
return transformation;
}
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4>
double point_to_plane_gaussnewton(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& norm_y,
const Eigen::MatrixBase<Derived4>& w,
Matrix44 Tk, Vector6& dir) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 12, 6> Matrix126;
typedef Eigen::Matrix<double, 9, 3> Matrix93;
typedef Eigen::Block<Matrix126, 9, 3> Block93;
typedef Eigen::Block<Matrix126, 3, 3> Block33;
typedef Eigen::Matrix<double, 12, 1> Vector12;
typedef Eigen::Matrix<double, 9, 1> Vector9;
typedef Eigen::Matrix<double, 4, 2> Matrix42;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w / w.sum();
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Vector6 log_T = LogToVec(LogMatrix(Tk));
Matrix33 B = VecToLog(log_T).block(0, 0, 3, 3);
double a = log_T[0];
double b = log_T[1];
double c = log_T[2];
Matrix33 R = Tk.block(0, 0, 3, 3);
Vector3 t = Tk.block(0, 3, 3, 1);
Vector3 u = log_T.tail(3);
Matrix93 dbdw = Matrix93::Zero();
dbdw(1, 2) = dbdw(5, 0) = dbdw(6, 1) = -1;
dbdw(2, 1) = dbdw(3, 2) = dbdw(7, 0) = 1;
Matrix93 db2dw = Matrix93::Zero();
db2dw(3, 1) = db2dw(4, 0) = db2dw(6, 2) = db2dw(8, 0) = a;
db2dw(0, 1) = db2dw(1, 0) = db2dw(7, 2) = db2dw(8, 1) = b;
db2dw(0, 2) = db2dw(2, 0) = db2dw(4, 2) = db2dw(5, 1) = c;
db2dw(1, 1) = db2dw(2, 2) = -2 * a;
db2dw(3, 0) = db2dw(5, 2) = -2 * b;
db2dw(6, 0) = db2dw(7, 1) = -2 * c;
double theta = std::sqrt(a*a + b*b + c*c);
double st = sin(theta), ct = cos(theta);
Matrix42 coeff = Matrix42::Zero();
if (theta>SAME_THRESHOLD)
{
coeff << st / theta, (1 - ct) / (theta*theta),
(theta*ct - st) / (theta*theta*theta), (theta*st - 2 * (1 - ct)) / pow(theta, 4),
(1 - ct) / (theta*theta), (theta - st) / pow(theta, 3),
(theta*st - 2 * (1 - ct)) / pow(theta, 4), (theta*(1 - ct) - 3 * (theta - st)) / pow(theta, 5);
}
else
coeff(0, 0) = 1;
Matrix93 tempB3;
tempB3.block<3, 3>(0, 0) = a*B;
tempB3.block<3, 3>(3, 0) = b*B;
tempB3.block<3, 3>(6, 0) = c*B;
Matrix33 B2 = B*B;
Matrix93 temp2B3;
temp2B3.block<3, 3>(0, 0) = a*B2;
temp2B3.block<3, 3>(3, 0) = b*B2;
temp2B3.block<3, 3>(6, 0) = c*B2;
Matrix93 dRdw = coeff(0, 0)*dbdw + coeff(1, 0)*tempB3
+ coeff(2, 0)*db2dw + coeff(3, 0)*temp2B3;
Vector9 dtdw = coeff(0, 1) * dbdw*u + coeff(1, 1) * tempB3*u
+ coeff(2, 1) * db2dw*u + coeff(3, 1)*temp2B3*u;
Matrix33 dtdu = Matrix33::Identity() + coeff(2, 0)*B + coeff(2, 1) * B2;
Eigen::VectorXd rk(X.cols());
Eigen::MatrixXd Jk(X.cols(), 6);
#pragma omp for
for (int i = 0; i < X.cols(); i++)
{
Vector3 xi = X.col(i);
Vector3 yi = Y.col(i);
Vector3 ni = norm_y.col(i);
double wi = sqrt(w_normalized[i]);
Matrix33 dedR = wi*ni * xi.transpose();
Vector3 dedt = wi*ni;
Vector6 dedx;
dedx(0) = (dedR.cwiseProduct(dRdw.block(0, 0, 3, 3))).sum()
+ dedt.dot(dtdw.head<3>());
dedx(1) = (dedR.cwiseProduct(dRdw.block(3, 0, 3, 3))).sum()
+ dedt.dot(dtdw.segment<3>(3));
dedx(2) = (dedR.cwiseProduct(dRdw.block(6, 0, 3, 3))).sum()
+ dedt.dot(dtdw.tail<3>());
dedx(3) = dedt.dot(dtdu.col(0));
dedx(4) = dedt.dot(dtdu.col(1));
dedx(5) = dedt.dot(dtdu.col(2));
Jk.row(i) = dedx.transpose();
rk[i] = wi * ni.dot(R*xi-yi+t);
}
LHS = Jk.transpose() * Jk;
RHS = -Jk.transpose() * rk;
Eigen::CompleteOrthogonalDecomposition<Matrix66> cod_(LHS);
dir = cod_.solve(RHS);
double gTd = -RHS.dot(dir);
return gTd;
}
public:
void point_to_point(MatrixNX& X, MatrixNX& Y, VectorN& source_mean,
VectorN& target_mean, ICP::Parameters& par){
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
MatrixNX Q = MatrixNX::Zero(N, X.cols());
VectorX W = VectorX::Zero(X.cols());
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
MatrixXX To1 = T.matrix();
MatrixXX To2 = T.matrix();
int nPoints = X.cols();
//Anderson Acc para
AndersonAcceleration accelerator_;
AffineNd SVD_T = T;
double energy = .0, last_energy = std::numeric_limits<double>::max();
//ground truth point clouds
MatrixNX X_gt = X;
if(par.has_groundtruth)
{
VectorN temp_trans = par.gt_trans.col(N).head(N);
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, N, N) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
//output para
std::string file_out = par.out_path;
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse = 0.0;
// dynamic welsch paras
double nu1 = 1, nu2 = 1;
double begin_init = omp_get_wtime();
//Find initial closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = T * X.col(i);
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
if(par.f == ICP::WELSCH)
{
//dynamic welsch, calc k-nearest points with itself;
nu2 = par.nu_end_k * FindKnearestMed(kdtree, Y, 7);
double med1;
igl::median(W, med1);
nu1 = par.nu_begin_k * med1;
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
//AA init
accelerator_.init(par.anderson_m, (N + 1) * (N + 1), LogMatrix(T.matrix()).data());
begin_time = omp_get_wtime();
bool stop1 = false;
while(!stop1)
{
/// run ICP
int icp = 0;
for (; icp<par.max_icp; ++icp)
{
bool accept_aa = false;
energy = get_energy(par.f, W, nu1);
if (par.use_AA)
{
if (energy < last_energy) {
last_energy = energy;
accept_aa = true;
}
else{
accelerator_.replace(LogMatrix(SVD_T.matrix()).data());
//Re-find the closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = SVD_T * X.col(i);
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
last_energy = get_energy(par.f, W, nu1);
}
}
else
last_energy = energy;
end_time = omp_get_wtime();
run_time = end_time - begin_time;
if(par.has_groundtruth)
{
gt_mse = (T*X - X_gt).squaredNorm()/nPoints;
}
// save results
energys.push_back(last_energy);
times.push_back(run_time);
gt_mses.push_back(gt_mse);
if (par.print_energy)
std::cout << "icp iter = " << icp << ", Energy = " << last_energy
<< ", time = " << run_time << std::endl;
robust_weight(par.f, W, nu1);
// Rotation and translation update
T = point_to_point(X, Q, W);
//Anderson Acc
SVD_T = T;
if (par.use_AA)
{
AffineMatrixN Trans = (Eigen::Map<const AffineMatrixN>(accelerator_.compute(LogMatrix(T.matrix()).data()).data(), N+1, N+1)).exp();
T.linear() = Trans.block(0,0,N,N);
T.translation() = Trans.block(0,N,N,1);
}
// Find closest point
#pragma omp parallel for
for (int i = 0; i<nPoints; ++i) {
VectorN cur_p = T * X.col(i) ;
Q.col(i) = Y.col(kdtree.closest(cur_p.data()));
W[i] = (cur_p - Q.col(i)).norm();
}
/// Stopping criteria
double stop2 = (T.matrix() - To2).norm();
To2 = T.matrix();
if(stop2 < par.stop)
{
break;
}
}
if(par.f!= ICP::WELSCH)
stop1 = true;
else
{
stop1 = fabs(nu1 - nu2)<SAME_THRESHOLD? true: false;
nu1 = nu1*par.nu_alpha > nu2? nu1*par.nu_alpha : nu2;
if(par.use_AA)
{
accelerator_.reset(LogMatrix(T.matrix()).data());
last_energy = std::numeric_limits<double>::max();
}
}
}
///calc convergence energy
last_energy = get_energy(par.f, W, nu1);
X = T * X;
gt_mse = (X-X_gt).squaredNorm()/nPoints;
T.translation() += - T.rotation() * source_mean + target_mean;
X.colwise() += target_mean;
///save convergence result
par.convergence_energy = last_energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
//output time and energy
out_res.precision(16);
for (int i = 0; i<times.size(); i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
// template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y, Eigen::Matrix3Xd& norm_x, Eigen::Matrix3Xd& norm_y,
Eigen::Vector3d& source_mean, Eigen::Vector3d& target_mean,
ICP::Parameters &par) {
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd ori_X = X;
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
AffineMatrixN To1 = T.matrix();
X = T*X;
Eigen::Matrix3Xd X_gt = X;
if(par.has_groundtruth)
{
Eigen::Vector3d temp_trans = par.gt_trans.block(0, 3, 3, 1);
X_gt = ori_X;
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, 3, 3) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse = 0.0;
///dynamic welsch, calc k-nearest points with itself;
double begin_init = omp_get_wtime();
//Anderson Acc para
AndersonAcceleration accelerator_;
AffineNd LG_T = T;
double energy = 0.0, prev_res = std::numeric_limits<double>::max(), res = 0.0;
// Find closest point
#pragma omp parallel for
for (int i = 0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
begin_time = omp_get_wtime();
int total_iter = 0;
double test_total_time = 0.0;
bool stop1 = false;
while(!stop1)
{
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
total_iter++;
bool accept_aa = false;
energy = get_energy(par.f, W, par.p);
end_time = omp_get_wtime();
run_time = end_time - begin_time;
energys.push_back(energy);
times.push_back(run_time);
Eigen::VectorXd test_w = (X-Qp).colwise().norm();
if(par.has_groundtruth)
{
gt_mse = (X - X_gt).squaredNorm()/X.cols();
}
gt_mses.push_back(gt_mse);
/// Compute weights
robust_weight(par.f, W, par.p);
/// Rotation and translation update
T = point_to_plane(X, Qp, Qn, W, Eigen::VectorXd::Zero(X.cols()))*T;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
if(par.print_energy)
std::cout << "icp iter = " << total_iter << ", gt_mse = " << gt_mse
<< ", energy = " << energy << std::endl;
/// Stopping criteria
double stop2 = (T.matrix() - To1).norm();
To1 = T.matrix();
if(stop2 < par.stop) break;
}
stop1 = true;
}
par.res_trans = T.matrix();
///calc convergence energy
W = (Qn.array()*(X - Qp).array()).colwise().sum().abs().transpose();
energy = get_energy(par.f, W, par.p);
gt_mse = (X - X_gt).squaredNorm() / X.cols();
T.translation().noalias() += -T.rotation()*source_mean + target_mean;
X.colwise() += target_mean;
norm_x = T.rotation()*norm_x;
///save convergence result
par.convergence_energy = energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
///output time and energy
out_res.precision(16);
for (int i = 0; i<total_iter; i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
/// Reweighted ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
// template <typename Derived1, typename Derived2, typename Derived3>
void point_to_plane_GN(Eigen::Matrix3Xd& X,
Eigen::Matrix3Xd& Y, Eigen::Matrix3Xd& norm_x, Eigen::Matrix3Xd& norm_y,
Eigen::Vector3d& source_mean, Eigen::Vector3d& target_mean,
ICP::Parameters &par) {
/// Build kd-tree
KDtree kdtree(Y);
/// Buffers
Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols());
Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols());
Eigen::Matrix3Xd ori_X = X;
AffineNd T;
if (par.use_init) T.matrix() = par.init_trans;
else T = AffineNd::Identity();
AffineMatrixN To1 = T.matrix();
X = T*X;
Eigen::Matrix3Xd X_gt = X;
if(par.has_groundtruth)
{
Eigen::Vector3d temp_trans = par.gt_trans.block(0, 3, 3, 1);
X_gt = ori_X;
X_gt.colwise() += source_mean;
X_gt = par.gt_trans.block(0, 0, 3, 3) * X_gt;
X_gt.colwise() += temp_trans - target_mean;
}
std::vector<double> times, energys, gt_mses;
double begin_time, end_time, run_time;
double gt_mse;
///dynamic welsch, calc k-nearest points with itself;
double nu1 = 1, nu2 = 1;
double begin_init = omp_get_wtime();
//Anderson Acc para
AndersonAcceleration accelerator_;
Vector6 LG_T;
Vector6 Dir;
//add time test
double energy = 0.0, prev_energy = std::numeric_limits<double>::max();
if(par.use_AA)
{
Eigen::Matrix4d log_T = LogMatrix(T.matrix());
LG_T = LogToVec(log_T);
accelerator_.init(par.anderson_m, 6, LG_T.data());
}
// Find closest point
#pragma omp parallel for
for (int i = 0; i<X.cols(); ++i) {
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
if(par.f == ICP::WELSCH)
{
double med1;
igl::median(W, med1);
nu1 =par.nu_begin_k * med1;
nu2 = par.nu_end_k * FindKnearestNormMed(kdtree, Y, 7, norm_y);
}
double end_init = omp_get_wtime();
double init_time = end_init - begin_init;
begin_time = omp_get_wtime();
int total_iter = 0;
double test_total_time = 0.0;
bool stop1 = false;
par.max_icp = 6;
while(!stop1)
{
par.max_icp = std::min(par.max_icp+1, 10);
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
total_iter++;
int n_linsearch = 0;
energy = get_energy(par.f, W, nu1);
if(par.use_AA)
{
if(energy < prev_energy)
{
prev_energy = energy;
}
else
{
// line search
double alpha = 0.0;
Vector6 new_t = LG_T;
Eigen::VectorXd lowest_W = W;
Eigen::Matrix3Xd lowest_Qp = Qp;
Eigen::Matrix3Xd lowest_Qn = Qn;
Eigen::Affine3d lowest_T = T;
n_linsearch++;
alpha = 1;
new_t = LG_T + alpha * Dir;
T.matrix() = VecToLog(new_t).exp();
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
double test_energy = get_energy(par.f, W, nu1);
if(test_energy < energy)
{
accelerator_.reset(new_t.data());
energy = test_energy;
}
else
{
Qp = lowest_Qp;
Qn = lowest_Qn;
W = lowest_W;
T = lowest_T;
}
prev_energy = energy;
}
}
else
{
prev_energy = energy;
}
end_time = omp_get_wtime();
run_time = end_time - begin_time;
energys.push_back(prev_energy);
times.push_back(run_time);
if(par.has_groundtruth)
{
gt_mse = (X - X_gt).squaredNorm()/X.cols();
}
gt_mses.push_back(gt_mse);
/// Compute weights
robust_weight(par.f, W, nu1);
/// Rotation and translation update
point_to_plane_gaussnewton(ori_X, Qp, Qn, W, T.matrix(), Dir);
LG_T = LogToVec(LogMatrix(T.matrix()));
LG_T += Dir;
T.matrix() = VecToLog(LG_T).exp();
// Anderson acc
if(par.use_AA)
{
Vector6 AA_t;
AA_t = accelerator_.compute(LG_T.data());
T.matrix() = VecToLog(AA_t).exp();
}
if(par.print_energy)
std::cout << "icp iter = " << total_iter << ", gt_mse = " << gt_mse
<< ", nu1 = " << nu1 << ", acept_aa= " << n_linsearch
<< ", energy = " << prev_energy << std::endl;
/// Find closest point
#pragma omp parallel for
for(int i=0; i<X.cols(); i++) {
X.col(i) = T * ori_X.col(i);
int id = kdtree.closest(X.col(i).data());
Qp.col(i) = Y.col(id);
Qn.col(i) = norm_y.col(id);
W[i] = std::abs(Qn.col(i).transpose() * (X.col(i) - Qp.col(i)));
}
/// Stopping criteria
double stop2 = (T.matrix() - To1).norm();
To1 = T.matrix();
if(stop2 < par.stop) break;
}
if(par.f == ICP::WELSCH)
{
stop1 = fabs(nu1 - nu2)<SAME_THRESHOLD? true: false;
nu1 = nu1*par.nu_alpha > nu2 ? nu1*par.nu_alpha : nu2;
if(par.use_AA)
{
accelerator_.reset(LogToVec(LogMatrix(T.matrix())).data());
prev_energy = std::numeric_limits<double>::max();
}
}
else
stop1 = true;
}
par.res_trans = T.matrix();
///calc convergence energy
W = (Qn.array()*(X - Qp).array()).colwise().sum().abs().transpose();
energy = get_energy(par.f, W, nu1);
gt_mse = (X - X_gt).squaredNorm() / X.cols();
T.translation().noalias() += -T.rotation()*source_mean + target_mean;
X.colwise() += target_mean;
norm_x = T.rotation()*norm_x;
///save convergence result
par.convergence_energy = energy;
par.convergence_gt_mse = gt_mse;
par.res_trans = T.matrix();
///output
if (par.print_output)
{
std::ofstream out_res(par.out_path);
if (!out_res.is_open())
{
std::cout << "Can't open out file " << par.out_path << std::endl;
}
///output time and energy
out_res.precision(16);
for (int i = 0; i<total_iter; i++)
{
out_res << times[i] << " "<< energys[i] << " " << gt_mses[i] << std::endl;
}
out_res.close();
std::cout << " write res to " << par.out_path << std::endl;
}
}
};
#endif
|
timer.c | /*
* Copyright (c) 2011-2019, Triad National Security, LLC.
* All rights Reserved.
*
* CLAMR -- LA-CC-11-094
*
* Copyright 2011-2019. Triad National Security, LLC. This software was produced
* under U.S. Government contract 89233218CNA000001 for Los Alamos National
* Laboratory (LANL), which is operated by Triad National Security, LLC
* for the U.S. Department of Energy. The U.S. Government has rights to use,
* reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
* TRIAD NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified
* to produce derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Triad National Security, LLC, Los Alamos
* National Laboratory, LANL, the U.S. Government, nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE TRIAD NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TRIAD NATIONAL
* SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* CLAMR -- LA-CC-11-094
* This research code is being developed as part of the
* 2011 X Division Summer Workshop for the express purpose
* of a collaborative code for development of ideas in
* the implementation of AMR codes for Exascale platforms
*
* AMR implementation of the Wave code previously developed
* as a demonstration code for regular grids on Exascale platforms
* as part of the Supercomputing Challenge and Los Alamos
* National Laboratory
*
* Authors: Bob Robey XCP-2 brobey@lanl.gov
* Neal Davis davis68@lanl.gov, davis68@illinois.edu
* David Nicholaeff dnic@lanl.gov, mtrxknight@aol.com
* Dennis Trujillo dptrujillo@lanl.gov, dptru10@gmail.com
*
*/
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "timer.h"
void cpu_timer_start(struct timespec *tstart_cpu){
#ifdef _OPENMP
if ( omp_in_parallel() ) {
#pragma omp master
{
clock_gettime(CLOCK_MONOTONIC, tstart_cpu);
}
} else {
clock_gettime(CLOCK_MONOTONIC, tstart_cpu);
}
#else
clock_gettime(CLOCK_MONOTONIC, tstart_cpu);
#endif
}
double cpu_timer_stop(struct timespec tstart_cpu){
double result;
struct timespec tstop_cpu, tresult;
#ifdef _OPENMP
if ( omp_in_parallel() ) {
#pragma omp master
{
clock_gettime(CLOCK_MONOTONIC, &tstop_cpu);
tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec;
tresult.tv_nsec = tstop_cpu.tv_nsec - tstart_cpu.tv_nsec;
result = (double)tresult.tv_sec + (double)tresult.tv_nsec*1.0e-9;
}
} else {
clock_gettime(CLOCK_MONOTONIC, &tstop_cpu);
tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec;
tresult.tv_nsec = tstop_cpu.tv_nsec - tstart_cpu.tv_nsec;
result = (double)tresult.tv_sec + (double)tresult.tv_nsec*1.0e-9;
}
#else
clock_gettime(CLOCK_MONOTONIC, &tstop_cpu);
tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec;
tresult.tv_nsec = tstop_cpu.tv_nsec - tstart_cpu.tv_nsec;
result = (double)tresult.tv_sec + (double)tresult.tv_nsec*1.0e-9;
#endif
return(result);
}
|
par_relax_more.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG -
* these do not go through the CF interface (hypre_BoomerAMGRelaxIF)
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int*,HYPRE_Real *,HYPRE_Real *,HYPRE_Int *);
/******************************************************************************
*
*use max norm to estimate largest eigenvalue
*
*****************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Real *max_eig)
{
HYPRE_Real e_max;
HYPRE_Real row_sum, max_norm;
HYPRE_Real *A_diag_data;
HYPRE_Real *A_offd_data;
HYPRE_Real temp;
HYPRE_Real diag_value;
HYPRE_Int pos_diag, neg_diag;
HYPRE_Int A_num_rows;
HYPRE_Int *A_diag_i;
HYPRE_Int *A_offd_i;
HYPRE_Int j;
HYPRE_Int i, start;
/* estimate with the inf-norm of A - should be ok for SPD matrices */
A_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
A_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
A_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
A_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(A));
A_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(A));
max_norm = 0.0;
pos_diag = neg_diag = 0;
for ( i = 0; i < A_num_rows; i++ )
{
start = A_diag_i[i];
diag_value = A_diag_data[start];
if (diag_value > 0)
{
pos_diag++;
}
if (diag_value < 0)
{
neg_diag++;
diag_value = -diag_value;
}
row_sum = diag_value;
/*for (j = 0; j < row_length; j++)*/
for (j = start+1; j < A_diag_i[i+1]; j++)
{
row_sum += fabs(A_diag_data[j]);
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
row_sum += fabs(A_offd_data[j]);
}
if (scale)
{
if (diag_value != 0.0)
row_sum = row_sum/diag_value;
}
if ( row_sum > max_norm ) max_norm = row_sum;
}
/* get max across procs */
hypre_MPI_Allreduce(&max_norm, &temp, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A));
max_norm = temp;
/* from Charles */
if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm;
/* eig estimates */
e_max = max_norm;
/* return */
*max_eig = e_max;
return hypre_error_flag;
}
/******************************************************************************
use CG to get the eigenvalue estimate
scale means get eig est of (D^{-1/2} A D^{-1/2}
******************************************************************************/
HYPRE_Int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int max_iter,
HYPRE_Real *max_eig,
HYPRE_Real *min_eig)
{
HYPRE_Int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
HYPRE_Real *tridiag = NULL;
HYPRE_Real *trioffd = NULL;
HYPRE_Real lambda_max ;
HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
HYPRE_Real diag;
HYPRE_Real lambda_min;
HYPRE_Real *s_data, *p_data, *ds_data, *u_data;
HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
/* check the size of A - don't iterate more than the size */
HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < (HYPRE_BigInt) max_iter)
max_iter = (HYPRE_Int) size;
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
hypre_ParVectorSetPartitioningOwner(r,0);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
hypre_ParVectorSetPartitioningOwner(p,0);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
hypre_ParVectorSetPartitioningOwner(s,0);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
hypre_ParVectorSetPartitioningOwner(u,0);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(HYPRE_Real, max_iter+1, HYPRE_MEMORY_HOST);
trioffd = hypre_CTAlloc(HYPRE_Real, max_iter+1, HYPRE_MEMORY_HOST);
for (i=0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r,1);
if (scale)
{
for (i = 0; i < local_size; i++)
{
diag = A_diag_data[A_diag_i[i]];
ds_data[i] = 1/sqrt(diag);
}
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds,1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r,p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r,s);
if (i==0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s,p);
/* alpha = gamma / <s,p> */
alpha = gamma/sdotp;
/* get tridiagonal matrix */
alphainv = 1.0/alpha;
tridiag[i+1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i+1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy( -alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err);
lambda_max = tridiag[i-1];
lambda_min = tridiag[0];
/* hypre_printf("linpack max eig est = %g\n", lambda_max);*/
/* hypre_printf("linpack min eig est = %g\n", lambda_min);*/
hypre_TFree(tridiag, HYPRE_MEMORY_HOST);
hypre_TFree(trioffd, HYPRE_MEMORY_HOST);
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound, lower_bound;
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real coefs[5];
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Real tmp_d;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data, *tmp_data;
HYPRE_Real diag;
hypre_ParVector *ds;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig)* fraction + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
orig_u = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_HOST);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,tmp_d) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax_FCFJacobi
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax_FCFJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Real relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp)
{
HYPRE_Int i;
HYPRE_Int relax_points[3];
HYPRE_Int relax_type = 0;
relax_points[0] = -1; /*F */
relax_points[1] = 1; /*C */
relax_points[2] = -1; /*F */
/* cf == NULL --> size == 0 */
if (cf_marker == NULL)
{
hypre_assert(hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)) == 0);
}
for (i=0; i < 3; i++)
{
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
relax_points[i],
relax_weight,
0.0,
NULL,
u,
Vtemp, NULL);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother -
*
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int num_its)
{
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_PCGSetTol(solver, 0.0); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
#if 0
{
HYPRE_Int myid;
HYPRE_Int num_iterations;
HYPRE_Real final_res_norm;
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
if (myid ==0)
{
hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations);
hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real*,HYPRE_Real*);
HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n,HYPRE_Real *d,HYPRE_Real *e,HYPRE_Int *ierr)
{
/* System generated locals */
HYPRE_Int i__1,i__2;
HYPRE_Real d__1,d__2,c_b10 = 1.0;
/* Local variables */
HYPRE_Real c,f,g,h;
HYPRE_Int i,j,l,m;
HYPRE_Real p,r,s,c2,c3 = 0.0;
HYPRE_Int l1,l2;
HYPRE_Real s2 = 0.0;
HYPRE_Int ii;
HYPRE_Real dl1,el1;
HYPRE_Int mml;
HYPRE_Real tst1,tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
HYPRE_Real ds;
--e;
--d;
*ierr = 0;
if (*n == 1) {
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i) {
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l) {
j = 0;
h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2));
if (tst1 < h) {
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m) {
tst2 = tst1 + (d__1 = e[m],fabs(d__1));
if (tst2 == tst1) {
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l) {
goto L210;
}
L130:
if (j == 30) {
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p,&c_b10);
ds = 1.0; if (p < 0.0) ds = -1.0;
d[l] = e[l] / (p + ds*r);
d[l1] = e[l] * (p + ds*r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n) {
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i) {
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii) {
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p,&e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l],fabs(d__1));
if (tst2 > tst1) {
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1) {
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii) {
i = l + 2 - ii;
if (p >= d[i - 1]) {
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a,HYPRE_Real *b)
{
/* System generated locals */
HYPRE_Real ret_val,d__1,d__2,d__3;
/* Local variables */
HYPRE_Real p,r,s,t,u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a),d__2 = fabs(*b);
p = hypre_max(d__1,d__2);
if (!p) {
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a),d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2,d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.) {
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_Jacobi (same as the one in AMS, but this allows CF)
u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax_L1_Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id ;
HYPRE_Real zero = 0.0;
HYPRE_Real res;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight * res)/l1_norms[i];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return 0;
}
|
a5atom.c | #define N 100000000
#define MAX 4
int a[N],b[N],ind[N];
long long s=0;
main()
{
int i;
/* inicialitzacio, no en paral.lel */
for(i=0;i<N;i++)
{
a[i]=1;
b[i]=2;
ind[i]=i%MAX;
}
#pragma omp parallel for
for (i=0;i<N;i++)
#pragma omp atomic
b[ind[i]] += a[i];
for (i=0;i<MAX;i++)
{
printf("Valor %d, de b %d \n",i,b[i]);
s+=b[i];
}
printf("Suma total de b: %ld\n",s);
}
|
hello.c | #include <stdio.h>
#include<omp.h>
int main ()
{
#pragma omp parallel
{
int i = omp_get_thread_num();
printf("Hello %d ",i);
printf("world! %d \n",i);
}
}
|
Fig_6.11_mandelbrotSolutionPart2.c | // combine Fig_6.10_mandelbrotSolutionPart1.c and Fig_6.11_mandelbrotSolutionPart2.c into one file, and name it as mandel_par.c
// sample compile command: gcc -fopenmp -o mandel_par mandel_par.c
void testpoint(struct d_complex c)
{
struct d_complex z;
int iter;
double temp;
z = c;
for (iter = 0; iter < MXITR; iter++) {
temp = (z.r * z.r) - (z.i * z.i) + c.r;
z.i = z.r * z.i * 2 + c.i;
z.r = temp;
if ((z.r * z.r + z.i * z.i) > 4.0) {
#pragma omp critical
numoutside++;
break;
}
}
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(4*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t2+Nx,256),floord(Nt+Nx-4,256)),floord(2*t1+Nx+1,256)),floord(32*t3+Nx+28,256)),floord(4*t1-4*t2+Nz+Nx-1,256));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),256*t4+254),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
displayUtils.h | /*
* utility functions for visualization of results (disparity in color, warped output, ...)
*/
#pragma once
#include <sstream>
#include <fstream>
#if (CV_MAJOR_VERSION ==2)
#include <opencv2/contrib/contrib.hpp> // needed for applyColorMap!
#endif
#include "point_cloud.h"
#include "point_cloud_list.h"
/* compute gamma correction (just for display purposes to see more details in farther away areas of disparity image)
* Input: img - image
* gamma - gamma value
* Output: gamma corrected image
*/
Mat correctGamma( Mat& img, double gamma ) {
double inverse_gamma = 1.0 / gamma;
Mat lut_matrix(1, 256, CV_8UC1 );
uchar * ptr = lut_matrix.ptr();
for( int i = 0; i < 256; i++ )
ptr[i] = (int)( pow( (double) i / 255.0, inverse_gamma ) * 255.0 );
Mat result;
LUT( img, lut_matrix, result );
return result;
}
static void getDisparityForDisplay(const Mat_<float> &disp, Mat &dispGray, Mat &dispColor, float numDisparities, float minDisp = 0.0f){
float gamma = 2.0f; // to get higher contrast for lower disparity range (just for color visualization)
disp.convertTo(dispGray,CV_16U,65535.f/(numDisparities-minDisp),-minDisp*65535.f/(numDisparities-minDisp));
Mat disp8;
disp.convertTo(disp8,CV_8U,255.f/(numDisparities-minDisp),-minDisp*255.f/(numDisparities-minDisp));
if(minDisp == 0.0f)
disp8 = correctGamma(disp8,gamma);
applyColorMap(disp8, dispColor, COLORMAP_JET);
for(int y = 0; y < dispColor.rows; y++){
for(int x = 0; x < dispColor.cols; x++){
if(disp(y,x) <= 0.0f)
dispColor.at<Vec3b>(y,x) = Vec3b(0,0,0);
}
}
}
static void convertDisparityDepthImage(const Mat_<float> &dispL, Mat_<float> &d, float f, float baseline){
d = Mat::zeros(dispL.rows, dispL.cols, CV_32F);
for(int y = 0; y < dispL.rows; y++){
for(int x = 0; x < dispL.cols; x++){
d(y,x) = disparityDepthConversion(f,baseline,dispL(y,x));
}
}
}
static string getColorString(uint8_t color){
stringstream ss;
ss << (int)color << " " << (int)color << " " << (int)color;
return ss.str();
}
static string getColorString(Vec3b color){
stringstream ss;
ss << (int)color(2) << " " << (int)color(1) << " " << (int)color(0);
return ss.str();
}
static string getColorString(Vec3i color){
stringstream ss;
ss << (int)((float)color(2)/256.f) << " " << (int)((float)color(1)/256.f) << " " << (int)((float)color(0)/256.f);
return ss.str();
}
static void storePlyFileBinaryPointCloud (char* plyFilePath, PointCloudList &pc, Mat_<float> &distImg) {
cout << "store 3D points to ply file" << endl;
FILE *outputPly;
outputPly=fopen(plyFilePath,"wb");
/*write header*/
fprintf(outputPly, "ply\n");
fprintf(outputPly, "format binary_little_endian 1.0\n");
fprintf(outputPly, "element vertex %d\n",pc.size);
fprintf(outputPly, "property float x\n");
fprintf(outputPly, "property float y\n");
fprintf(outputPly, "property float z\n");
// fprintf(outputPly, "property float nx\n");
// fprintf(outputPly, "property float ny\n");
// fprintf(outputPly, "property float nz\n");
fprintf(outputPly, "property uchar red\n");
fprintf(outputPly, "property uchar green\n");
fprintf(outputPly, "property uchar blue\n");
fprintf(outputPly, "end_header\n");
distImg = Mat::zeros(pc.rows,pc.cols,CV_32F);
//write data
#pragma omp parallel for
for(size_t i = 0; i < pc.size; i++) {
const Point_li &p = pc.points[i];
// const float4 normal = p.normal;
float4 X = p.coord;
const char color_r = (int)p.texture4[2];
const char color_g = (int)p.texture4[1];
const char color_b = (int)p.texture4[0];
/*const int color = 127.0f;*/
/*printf("Writing point %f %f %f\n", X.x, X.y, X.z);*/
if(!(X.x < FLT_MAX && X.x > -FLT_MAX) || !(X.y < FLT_MAX && X.y > -FLT_MAX) || !(X.z < FLT_MAX && X.z >= -FLT_MAX)){
X.x = 0.0f;
X.y = 0.0f;
X.z = 0.0f;
}
#pragma omp critical
{
/*myfile << X.x << " " << X.y << " " << X.z << " " << normal.x << " " << normal.y << " " << normal.z << " " << color << " " << color << " " << color << endl;*/
fwrite(&X.x, sizeof(X.x), 1, outputPly);
fwrite(&X.y, sizeof(X.y), 1, outputPly);
fwrite(&X.z, sizeof(X.z), 1, outputPly);
// fwrite(&normal.x, sizeof(normal.x), 1, outputPly);
// fwrite(&normal.y, sizeof(normal.y), 1, outputPly);
// fwrite(&normal.z, sizeof(normal.z), 1, outputPly);
fwrite(&color_r, sizeof(char), 1, outputPly);
fwrite(&color_g, sizeof(char), 1, outputPly);
fwrite(&color_b, sizeof(char), 1, outputPly);
}
}
fclose(outputPly);
}
static void getNormalsForDisplay(const Mat &normals, Mat &normals_display, int rtype = CV_16U){
if(rtype == CV_8U)
normals.convertTo(normals_display,CV_8U,128,128);
else
normals.convertTo(normals_display,CV_16U,32767,32767);
cvtColor(normals_display,normals_display,COLOR_RGB2BGR);
}
|
vla_crash.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
int a;
void foo(void) {
int(*b)[a];
int *(**c)[a];
#pragma omp parallel if (0)
b[0][0] = c[0][a][0][a];
}
void bar(int n, int *a) {
// expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}}
int(*p)[n] = &a;
#pragma omp parallel if(0)
// expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}}
if (p == &a) {
}
}
// CHECK1-LABEL: define {{[^@]+}}@foo
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[B:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[C:%.*]] = alloca i32***, align 8
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[B]], i64 [[TMP4]], i32**** [[C]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[B:%.*]], i64 noundef [[VLA1:%.*]], i32**** noundef nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i32****, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32** [[B]], i32*** [[B_ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: store i32**** [[C]], i32***** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32****, i32***** [[C_ADDR]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32***, i32**** [[TMP3]], align 8
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32**, i32*** [[TMP4]], i64 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i32**, i32*** [[ARRAYIDX]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT: [[TMP7:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP2]]
// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32*, i32** [[TMP5]], i64 [[TMP7]]
// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32*, i32** [[ARRAYIDX3]], i64 0
// CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[ARRAYIDX4]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP9]] to i64
// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM5]]
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 0, [[TMP0]]
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[TMP12]]
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX7]], i64 0
// CHECK1-NEXT: store i32 [[TMP10]], i32* [[ARRAYIDX8]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@bar
// CHECK1-SAME: (i32 noundef signext [[N:%.*]], i32* noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[P:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32** [[A_ADDR]] to i32*
// CHECK1-NEXT: store i32* [[TMP3]], i32** [[P]], align 8
// CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT: call void @.omp_outlined..1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[P]], i32** [[A_ADDR]]) #[[ATTR2]]
// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[P:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[P_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: store i32** [[P]], i32*** [[P_ADDR]], align 8
// CHECK1-NEXT: store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32**, i32*** [[P_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i32** [[TMP2]] to i32*
// CHECK1-NEXT: [[CMP:%.*]] = icmp eq i32* [[TMP3]], [[TMP4]]
// CHECK1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK1: if.then:
// CHECK1-NEXT: br label [[IF_END]]
// CHECK1: if.end:
// CHECK1-NEXT: ret void
//
|
c99-omp-ping-pong.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#ifdef _OPENMP
# include <omp.h>
#else
# error No OpenMP support!
#endif
#ifdef SEQUENTIAL_CONSISTENCY
# if ( _OPENMP < 201307 )
# error You need OpenMP 4+ for seq_cst atomics.
# endif
# define OMP_ATOMIC_LOAD _Pragma("omp atomic read seq_cst")
# define OMP_ATOMIC_STORE _Pragma("omp atomic write seq_cst")
# define OMP_FLUSH
#else
# define OMP_ATOMIC_LOAD _Pragma("omp atomic read")
# define OMP_ATOMIC_STORE _Pragma("omp atomic write")
# define OMP_FLUSH _Pragma("omp flush")
#endif
int main(int argc, char * argv[])
{
int nt = omp_get_max_threads();
#if 1
if (nt != 2) omp_set_num_threads(2);
#else
if (nt < 2) omp_set_num_threads(2);
if (nt % 2 != 0) omp_set_num_threads(nt-1);
#endif
int iterations = (argc>1) ? atoi(argv[1]) : 1000000;
printf("thread ping-pong benchmark\n");
printf("num threads = %d\n", omp_get_max_threads());
printf("iterations = %d\n", iterations);
#ifdef SEQUENTIAL_CONSISTENCY
printf("memory model = %s\n", "seq_cst");
#else
printf("memory model = %s\n", "acq-rel");
#endif
fflush(stdout);
int left_ready = -1;
int right_ready = -1;
int left_payload = 0;
int right_payload = 0;
#pragma omp parallel
{
int me = omp_get_thread_num();
/// 0=left 1=right
bool parity = (me % 2 == 0);
int junk = 0;
/// START TIME
#pragma omp barrier
double t0 = omp_get_wtime();
for (int i=0; i<iterations; ++i) {
if (parity) {
/// send to left
left_payload = i;
OMP_ATOMIC_STORE
left_ready = i;
//OMP_FLUSH
/// recv from right
while (1) {
OMP_FLUSH
int temp;
OMP_ATOMIC_LOAD
temp = i;
if (temp == right_ready) break;
}
//printf("%d: left received %d\n", i, right_payload);
junk += right_payload;
} else {
/// recv from left
while (1) {
OMP_FLUSH
int temp;
OMP_ATOMIC_LOAD
temp = i;
if (temp == left_ready) break;
}
//printf("%d: right received %d\n", i, left_payload);
junk += left_payload;
///send to right
right_payload = i;
OMP_ATOMIC_STORE
right_ready = i;
//OMP_FLUSH
}
}
/// STOP TIME
#pragma omp barrier
double t1 = omp_get_wtime();
/// PRINT TIME
double dt = t1-t0;
#pragma omp critical
{
printf("total time elapsed = %lf\n", dt);
printf("time per iteration = %e\n", dt/iterations);
printf("%d\n", junk);
}
}
return 0;
}
|
SPGrid_Threading_Helper.h | //#####################################################################
// Copyright 2013, Raj Setaluri, Eftychios Sifakis.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
// Subroutine SPGrid_Computations::Threading_Helper
//#####################################################################
#ifndef __SPGrid_Threading_Helper_h__
#define __SPGrid_Threading_Helper_h__
#include <vector>
#include <SPGrid/Core/SPGrid_Allocator.h>
#include <Threading_Tools/PTHREAD_QUEUE.h>
extern PTHREAD_QUEUE* pthread_queue;
namespace SPGrid_Computations{
using namespace SPGrid;
template<class T_STRUCT,int d,class T_OPERATION>
struct Threading_Operation_Helper:public PTHREAD_QUEUE::TASK
{
typedef std::pair<const unsigned long*,unsigned> T_BLOCK;
SPGrid_Allocator<T_STRUCT,d>& allocator;
const T_BLOCK blocks;
const T_OPERATION& operation;
Threading_Operation_Helper(SPGrid_Allocator<T_STRUCT,d>& allocator_input,const T_BLOCK& blocks_input,const T_OPERATION& operation_input)
:allocator(allocator_input),blocks(blocks_input),operation(operation_input) {}
void Run(){operation.Run(allocator,blocks);}
};
template<class T_STRUCT,int d>
class Threading_Helper
{
typedef std::pair<const unsigned long*,unsigned> T_BLOCK;
SPGrid_Allocator<T_STRUCT,d>& allocator;
const T_BLOCK& blocks;
public:
Threading_Helper(SPGrid_Allocator<T_STRUCT,d>& allocator_input,const T_BLOCK& blocks_input)
:allocator(allocator_input),blocks(blocks_input)
{}
template<class T_OPERATION>
void Run_Parallel(const T_OPERATION& operation,std::vector<T_BLOCK> list_of_partitions)
{for(int partition=0;partition<list_of_partitions.size();partition++){
Threading_Operation_Helper<T_STRUCT,d,T_OPERATION>* task=
new Threading_Operation_Helper<T_STRUCT,d,T_OPERATION>(allocator,list_of_partitions[partition],operation);
pthread_queue->Queue(task);}
pthread_queue->Wait();}
template<class T_OPERATION>
void Run_Parallel(const T_OPERATION& operation,const int number_of_partitions)
{const unsigned long* block_offsets=blocks.first;
const int size=blocks.second;
if(size<number_of_partitions*16){operation.Run(allocator,blocks);return;}
for(int partition=0;partition<number_of_partitions;partition++){
int first_index_of_partition=(size/number_of_partitions)*(partition)+std::min(size%number_of_partitions,partition);
int block_size=(size/number_of_partitions)+((partition<size%number_of_partitions)?1:0);
T_BLOCK block(block_offsets+first_index_of_partition,block_size);
Threading_Operation_Helper<T_STRUCT,d,T_OPERATION>* task=
new Threading_Operation_Helper<T_STRUCT,d,T_OPERATION>(allocator,block,operation);
pthread_queue->Queue(task);}
pthread_queue->Wait();}
};
template<class Functor>
void Run_Parallel_Blocks(const std::pair<const uint64_t*,uint32_t>& blocks,Functor functor)
{
#pragma omp parallel for
for(int b=0;b<blocks.second;b++)
functor(blocks.first[b]);
}
//#####################################################################
}
#endif
|
diffusion3d_original.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <omp.h>
#define REAL float
#define NX (64)
#ifndef M_PI
#define M_PI (3.1415926535897932384626)
#endif
void init(REAL *buff, const int nx, const int ny, const int nz,
const REAL kx, const REAL ky, const REAL kz,
const REAL dx, const REAL dy, const REAL dz,
const REAL kappa, const REAL time) {
REAL ax, ay, az;
int jz, jy, jx;
ax = exp(-kappa*time*(kx*kx));
ay = exp(-kappa*time*(ky*ky));
az = exp(-kappa*time*(kz*kz));
for (jz = 0; jz < nz; jz++) {
for (jy = 0; jy < ny; jy++) {
for (jx = 0; jx < nx; jx++) {
int j = jz*nx*ny + jy*nx + jx;
REAL x = dx*((REAL)(jx + 0.5));
REAL y = dy*((REAL)(jy + 0.5));
REAL z = dz*((REAL)(jz + 0.5));
REAL f0 = (REAL)0.125
*(1.0 - ax*cos(kx*x))
*(1.0 - ay*cos(ky*y))
*(1.0 - az*cos(kz*z));
buff[j] = f0;
}
}
}
}
REAL accuracy(const REAL *b1, REAL *b2, const int len) {
REAL err = 0.0;
int i;
for (i = 0; i < len; i++) {
err += (b1[i] - b2[i]) * (b1[i] - b2[i]);
}
return (REAL)sqrt(err/len);
}
typedef void (*diffusion_loop_t)(REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct,
REAL cb, REAL cc, REAL dt,
REAL **f_ret, REAL *time_ret, int *count_ret);
static void
diffusion_baseline(REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct,
REAL cb, REAL cc, REAL dt,
REAL **f_ret, REAL *time_ret, int *count_ret) {
REAL time = 0.0;
int count = 0;
do {
int z;
for (z = 0; z < nz; z++) {
int y;
for (y = 0; y < ny; y++) {
int x;
for (x = 0; x < nx; x++) {
int c, w, e, n, s, b, t;
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx-1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny-1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz-1) ? c : c + nx * ny;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e]
+ cs * f1[s] + cn * f1[n] + cb * f1[b] + ct * f1[t];
}
}
}
REAL *t = f1;
f1 = f2;
f2 = t;
time += dt;
count++;
} while (time + 0.5*dt < 0.1);
*time_ret = time;
*f_ret = f1;
*count_ret = count;
return;
}
static void
diffusion_openmp(REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct,
REAL cb, REAL cc, REAL dt,
REAL **f_ret, REAL *time_ret, int *count_ret) {
#pragma omp parallel
{
REAL time = 0.0;
int count = 0;
REAL *f1_t = f1;
REAL *f2_t = f2;
#pragma omp master
printf("%d threads running\n", omp_get_num_threads());
do {
int z;
#pragma omp for
for (z = 0; z < nz; z++) {
int y;
for (y = 0; y < ny; y++) {
int x;
for (x = 0; x < nx; x++) {
int c, w, e, n, s, b, t;
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx-1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny-1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz-1) ? c : c + nx * ny;
f2_t[c] = cc * f1_t[c] + cw * f1_t[w] + ce * f1_t[e]
+ cs * f1_t[s] + cn * f1_t[n] + cb * f1_t[b] + ct * f1_t[t];
}
}
}
REAL *t = f1_t;
f1_t = f2_t;
f2_t = t;
time += dt;
count++;
} while (time + 0.5*dt < 0.1);
#pragma omp master
{
*f_ret = f1_t;
*time_ret = time;
*count_ret = count;
}
}
return;
}
int main(int argc, char *argv[])
{
struct timeval time_begin, time_end;
int nx = NX;
int ny = NX;
int nz = NX;
REAL *f1 = (REAL *)malloc(sizeof(REAL)*NX*NX*NX);
REAL *f2 = (REAL *)malloc(sizeof(REAL)*NX*NX*NX);
REAL time = 0.0;
int count = 0;
REAL l, dx, dy, dz, kx, ky, kz, kappa, dt;
REAL ce, cw, cn, cs, ct, cb, cc;
l = 1.0;
kappa = 0.1;
dx = dy = dz = l / nx;
kx = ky = kz = 2.0 * M_PI;
dt = 0.1*dx*dx / kappa;
init(f1, nx, ny, nz, kx, ky, kz, dx, dy, dz, kappa, time);
ce = cw = kappa*dt/(dx*dx);
cn = cs = kappa*dt/(dy*dy);
ct = cb = kappa*dt/(dz*dz);
cc = 1.0 - (ce + cw + cn + cs + ct + cb);
diffusion_loop_t diffusion_loop = diffusion_baseline;
if (argc == 2) {
if (strcmp(argv[1], "openmp") == 0) {
diffusion_loop = diffusion_openmp;
}
}
gettimeofday(&time_begin, NULL);
diffusion_loop(f1, f2, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc, dt,
&f1, &time, &count);
gettimeofday(&time_end, NULL);
REAL *answer = (REAL *)malloc(sizeof(REAL) * nx*ny*nz);
init(answer, nx, ny, nz, kx, ky, kz, dx, dy, dz, kappa, time);
REAL err = accuracy(f1, answer, nx*ny*nz);
double elapsed_time = (time_end.tv_sec - time_begin.tv_sec)
+ (time_end.tv_usec - time_begin.tv_usec)*1.0e-6;
REAL mflops = (nx*ny*nz)*13.0*count/elapsed_time * 1.0e-06;
double thput = (nx * ny * nz) * sizeof(REAL) * 2.0 * count
/ elapsed_time / (1 << 30);
fprintf(stderr, "elapsed time : %.3f (s)\n", elapsed_time);
fprintf(stderr, "flops : %.3f (MFlops)\n", mflops);
fprintf(stderr, "throughput : %.3f (GB/s)\n", thput);
fprintf(stderr, "accuracy : %e\n", err);
free(answer);
free(f1);
free(f2);
return 0;
}
|
ast-dump-openmp-for.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp for
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp for
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp for collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp for collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPForDirective {{.*}} <line:4:1, col:16>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-for.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPForDirective {{.*}} <line:10:1, col:16>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-for.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPForDirective {{.*}} <line:17:1, col:28>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:17, col:27>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:26> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:26> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-for.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPForDirective {{.*}} <line:24:1, col:28>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:17, col:27>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:26> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:26> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-for.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPForDirective {{.*}} <line:31:1, col:28>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:17, col:27>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:26> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:26> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-for.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
GB_binop__min_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__min_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint32)
// A*D function (colscale): GB (_AxD__min_uint32)
// D*A function (rowscale): GB (_DxB__min_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint32)
// C=scalar+B GB (_bind1st__min_uint32)
// C=scalar+B' GB (_bind1st_tran__min_uint32)
// C=A+scalar GB (_bind2nd__min_uint32)
// C=A'+scalar GB (_bind2nd_tran__min_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_UINT32 || GxB_NO_MIN_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__fmod_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__fmod_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp64)
// C=scalar+B GB (_bind1st__fmod_fp64)
// C=scalar+B' GB (_bind1st_tran__fmod_fp64)
// C=A+scalar GB (_bind2nd__fmod_fp64)
// C=A'+scalar GB (_bind2nd_tran__fmod_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = fmod (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmod (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP64 || GxB_NO_FMOD_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__fmod_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__fmod_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__fmod_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__fmod_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__fmod_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = fmod (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__fmod_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = fmod (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmod (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__fmod_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmod (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__fmod_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | //===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// DEFINE / INCLUDE
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <avilib.h>
#include <avimod.h>
#include <omp.h>
#include "define.c"
#include "kernel.c"
//===============================================================================================================================================================================================================200
// WRITE DATA FUNCTION
//===============================================================================================================================================================================================================200
void write_data( char* filename,
int frameNo,
int frames_processed,
int endoPoints,
int* input_a,
int* input_b,
int epiPoints,
int* input_2a,
int* input_2b){
//================================================================================80
// VARIABLES
//================================================================================80
FILE* fid;
int i,j;
char c;
//================================================================================80
// OPEN FILE FOR READING
//================================================================================80
fid = fopen(filename, "w+");
if( fid == NULL ){
printf( "The file was not opened for writing\n" );
return;
}
//================================================================================80
// WRITE VALUES TO THE FILE
//================================================================================80
fprintf(fid, "Total AVI Frames: %d\n", frameNo);
fprintf(fid, "Frames Processed: %d\n", frames_processed);
fprintf(fid, "endoPoints: %d\n", endoPoints);
fprintf(fid, "epiPoints: %d", epiPoints);
for(j=0; j<frames_processed;j++)
{
fprintf(fid, "\n---Frame %d---",j);
fprintf(fid, "\n--endo--\n",j);
for(i=0; i<endoPoints; i++){
fprintf(fid, "%d\t", input_a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<endoPoints; i++){
// if(input_b[j*size+i] > 2000) input_b[j*size+i]=0;
fprintf(fid, "%d\t", input_b[j+i*frameNo]);
}
fprintf(fid, "\n--epi--\n",j);
for(i=0; i<epiPoints; i++){
//if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2a[j+i*frameNo]);
}
fprintf(fid, "\n");
for(i=0; i<epiPoints; i++){
//if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0;
fprintf(fid, "%d\t", input_2b[j+i*frameNo]);
}
}
// ================================================================================80
// CLOSE FILE
// ================================================================================80
fclose(fid);
}
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// MAIN FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
int main(int argc, char *argv []){
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
// counters
int i;
int frames_processed;
// parameters
public_struct public;
private_struct private[ALL_POINTS];
//======================================================================================================================================================
// FRAMES
//======================================================================================================================================================
if(argc!=4){
printf("ERROR: usage: heartwall <inputfile> <num of frames> <num of threads>\n");
exit(1);
}
char* video_file_name;
video_file_name = argv[1];
avi_t* d_frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting
if (d_frames == NULL) {
AVI_print_error((char *) "Error with AVI_open_input_file");
return -1;
}
public.d_frames = d_frames;
public.frames = AVI_video_frames(public.d_frames);
public.frame_rows = AVI_video_height(public.d_frames);
public.frame_cols = AVI_video_width(public.d_frames);
public.frame_elem = public.frame_rows * public.frame_cols;
public.frame_mem = sizeof(fp) * public.frame_elem;
//======================================================================================================================================================
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================
frames_processed = atoi(argv[2]);
if(frames_processed<0 || frames_processed>public.frames){
printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, public.frames);
return 0;
}
int omp_num_threads;
omp_num_threads = atoi(argv[3]);
if (omp_num_threads <=0){
printf ("num of threads must be a positive integer");
return 0;
}
printf("num of threads: %d\n", omp_num_threads);
//======================================================================================================================================================
// INPUTS
//======================================================================================================================================================
//====================================================================================================
// ENDO POINTS
//====================================================================================================
public.endoPoints = ENDO_POINTS;
public.d_endo_mem = sizeof(int) * public.endoPoints;
public.d_endoRow = (int *)malloc(public.d_endo_mem);
public.d_endoRow[ 0] = 369;
public.d_endoRow[ 1] = 400;
public.d_endoRow[ 2] = 429;
public.d_endoRow[ 3] = 452;
public.d_endoRow[ 4] = 476;
public.d_endoRow[ 5] = 486;
public.d_endoRow[ 6] = 479;
public.d_endoRow[ 7] = 458;
public.d_endoRow[ 8] = 433;
public.d_endoRow[ 9] = 404;
public.d_endoRow[10] = 374;
public.d_endoRow[11] = 346;
public.d_endoRow[12] = 318;
public.d_endoRow[13] = 294;
public.d_endoRow[14] = 277;
public.d_endoRow[15] = 269;
public.d_endoRow[16] = 275;
public.d_endoRow[17] = 287;
public.d_endoRow[18] = 311;
public.d_endoRow[19] = 339;
public.d_endoCol = (int *)malloc(public.d_endo_mem);
public.d_endoCol[ 0] = 408;
public.d_endoCol[ 1] = 406;
public.d_endoCol[ 2] = 397;
public.d_endoCol[ 3] = 383;
public.d_endoCol[ 4] = 354;
public.d_endoCol[ 5] = 322;
public.d_endoCol[ 6] = 294;
public.d_endoCol[ 7] = 270;
public.d_endoCol[ 8] = 250;
public.d_endoCol[ 9] = 237;
public.d_endoCol[10] = 235;
public.d_endoCol[11] = 241;
public.d_endoCol[12] = 254;
public.d_endoCol[13] = 273;
public.d_endoCol[14] = 300;
public.d_endoCol[15] = 328;
public.d_endoCol[16] = 356;
public.d_endoCol[17] = 383;
public.d_endoCol[18] = 401;
public.d_endoCol[19] = 411;
public.d_tEndoRowLoc = (int *)malloc(public.d_endo_mem * public.frames);
public.d_tEndoColLoc = (int *)malloc(public.d_endo_mem * public.frames);
//====================================================================================================
// EPI POINTS
//====================================================================================================
public.epiPoints = EPI_POINTS;
public.d_epi_mem = sizeof(int) * public.epiPoints;
public.d_epiRow = (int *)malloc(public.d_epi_mem);
public.d_epiRow[ 0] = 390;
public.d_epiRow[ 1] = 419;
public.d_epiRow[ 2] = 448;
public.d_epiRow[ 3] = 474;
public.d_epiRow[ 4] = 501;
public.d_epiRow[ 5] = 519;
public.d_epiRow[ 6] = 535;
public.d_epiRow[ 7] = 542;
public.d_epiRow[ 8] = 543;
public.d_epiRow[ 9] = 538;
public.d_epiRow[10] = 528;
public.d_epiRow[11] = 511;
public.d_epiRow[12] = 491;
public.d_epiRow[13] = 466;
public.d_epiRow[14] = 438;
public.d_epiRow[15] = 406;
public.d_epiRow[16] = 376;
public.d_epiRow[17] = 347;
public.d_epiRow[18] = 318;
public.d_epiRow[19] = 291;
public.d_epiRow[20] = 275;
public.d_epiRow[21] = 259;
public.d_epiRow[22] = 256;
public.d_epiRow[23] = 252;
public.d_epiRow[24] = 252;
public.d_epiRow[25] = 257;
public.d_epiRow[26] = 266;
public.d_epiRow[27] = 283;
public.d_epiRow[28] = 305;
public.d_epiRow[29] = 331;
public.d_epiRow[30] = 360;
public.d_epiCol = (int *)malloc(public.d_epi_mem);
public.d_epiCol[ 0] = 457;
public.d_epiCol[ 1] = 454;
public.d_epiCol[ 2] = 446;
public.d_epiCol[ 3] = 431;
public.d_epiCol[ 4] = 411;
public.d_epiCol[ 5] = 388;
public.d_epiCol[ 6] = 361;
public.d_epiCol[ 7] = 331;
public.d_epiCol[ 8] = 301;
public.d_epiCol[ 9] = 273;
public.d_epiCol[10] = 243;
public.d_epiCol[11] = 218;
public.d_epiCol[12] = 196;
public.d_epiCol[13] = 178;
public.d_epiCol[14] = 166;
public.d_epiCol[15] = 157;
public.d_epiCol[16] = 155;
public.d_epiCol[17] = 165;
public.d_epiCol[18] = 177;
public.d_epiCol[19] = 197;
public.d_epiCol[20] = 218;
public.d_epiCol[21] = 248;
public.d_epiCol[22] = 276;
public.d_epiCol[23] = 304;
public.d_epiCol[24] = 333;
public.d_epiCol[25] = 361;
public.d_epiCol[26] = 391;
public.d_epiCol[27] = 415;
public.d_epiCol[28] = 434;
public.d_epiCol[29] = 448;
public.d_epiCol[30] = 455;
public.d_tEpiRowLoc = (int *)malloc(public.d_epi_mem * public.frames);
public.d_tEpiColLoc = (int *)malloc(public.d_epi_mem * public.frames);
//====================================================================================================
// ALL POINTS
//====================================================================================================
public.allPoints = ALL_POINTS;
//======================================================================================================================================================
// CONSTANTS
//======================================================================================================================================================
public.tSize = 25;
public.sSize = 40;
public.maxMove = 10;
public.alpha = 0.87;
//======================================================================================================================================================
// SUMS
//======================================================================================================================================================
for(i=0; i<public.allPoints; i++){
private[i].in_partial_sum = (fp *)malloc(sizeof(fp) * 2*public.tSize+1);
private[i].in_sqr_partial_sum = (fp *)malloc(sizeof(fp) * 2*public.tSize+1);
private[i].par_max_val = (fp *)malloc(sizeof(fp) * (2*public.tSize+2*public.sSize+1));
private[i].par_max_coo = (int *)malloc(sizeof(int) * (2*public.tSize+2*public.sSize+1));
}
//======================================================================================================================================================
// INPUT 2 (SAMPLE AROUND POINT)
//======================================================================================================================================================
public.in2_rows = 2 * public.sSize + 1;
public.in2_cols = 2 * public.sSize + 1;
public.in2_elem = public.in2_rows * public.in2_cols;
public.in2_mem = sizeof(fp) * public.in2_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_in2 = (fp *)malloc(public.in2_mem);
private[i].d_in2_sqr = (fp *)malloc(public.in2_mem);
}
//======================================================================================================================================================
// INPUT (POINT TEMPLATE)
//======================================================================================================================================================
public.in_mod_rows = public.tSize+1+public.tSize;
public.in_mod_cols = public.in_mod_rows;
public.in_mod_elem = public.in_mod_rows * public.in_mod_cols;
public.in_mod_mem = sizeof(fp) * public.in_mod_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_in_mod = (fp *)malloc(public.in_mod_mem);
private[i].d_in_sqr = (fp *)malloc(public.in_mod_mem);
}
//======================================================================================================================================================
// ARRAY OF TEMPLATES FOR ALL POINTS
//======================================================================================================================================================
public.d_endoT = (fp *)malloc(public.in_mod_mem * public.endoPoints);
public.d_epiT = (fp *)malloc(public.in_mod_mem * public.epiPoints);
//======================================================================================================================================================
// SETUP private POINTERS TO ROWS, COLS AND TEMPLATE
//======================================================================================================================================================
for(i=0; i<public.endoPoints; i++){
private[i].point_no = i;
private[i].in_pointer = private[i].point_no * public.in_mod_elem;
private[i].d_Row = public.d_endoRow; // original row coordinates
private[i].d_Col = public.d_endoCol; // original col coordinates
private[i].d_tRowLoc = public.d_tEndoRowLoc; // updated row coordinates
private[i].d_tColLoc = public.d_tEndoColLoc; // updated row coordinates
private[i].d_T = public.d_endoT; // templates
}
for(i=public.endoPoints; i<public.allPoints; i++){
private[i].point_no = i-public.endoPoints;
private[i].in_pointer = private[i].point_no * public.in_mod_elem;
private[i].d_Row = public.d_epiRow;
private[i].d_Col = public.d_epiCol;
private[i].d_tRowLoc = public.d_tEpiRowLoc;
private[i].d_tColLoc = public.d_tEpiColLoc;
private[i].d_T = public.d_epiT;
}
//======================================================================================================================================================
// CONVOLUTION
//======================================================================================================================================================
public.ioffset = 0;
public.joffset = 0;
public.conv_rows = public.in_mod_rows + public.in2_rows - 1; // number of rows in I
public.conv_cols = public.in_mod_cols + public.in2_cols - 1; // number of columns in I
public.conv_elem = public.conv_rows * public.conv_cols; // number of elements
public.conv_mem = sizeof(fp) * public.conv_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_conv = (fp *)malloc(public.conv_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM
//======================================================================================================================================================
//====================================================================================================
// PAD ARRAY
//====================================================================================================
//====================================================================================================
// VERTICAL CUMULATIVE SUM
//====================================================================================================
public.in2_pad_add_rows = public.in_mod_rows;
public.in2_pad_add_cols = public.in_mod_cols;
public.in2_pad_rows = public.in2_rows + 2*public.in2_pad_add_rows;
public.in2_pad_cols = public.in2_cols + 2*public.in2_pad_add_cols;
public.in2_pad_elem = public.in2_pad_rows * public.in2_pad_cols;
public.in2_pad_mem = sizeof(fp) * public.in2_pad_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_in2_pad = (fp *)malloc(public.in2_pad_mem);
}
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION
//====================================================================================================
//====================================================================================================
// HORIZONTAL CUMULATIVE SUM
//====================================================================================================
public.in2_pad_cumv_sel_rowlow = 1 + public.in_mod_rows; // (1 to n+1)
public.in2_pad_cumv_sel_rowhig = public.in2_pad_rows - 1;
public.in2_pad_cumv_sel_collow = 1;
public.in2_pad_cumv_sel_colhig = public.in2_pad_cols;
public.in2_pad_cumv_sel2_rowlow = 1;
public.in2_pad_cumv_sel2_rowhig = public.in2_pad_rows - public.in_mod_rows - 1;
public.in2_pad_cumv_sel2_collow = 1;
public.in2_pad_cumv_sel2_colhig = public.in2_pad_cols;
public.in2_sub_rows = public.in2_pad_cumv_sel_rowhig - public.in2_pad_cumv_sel_rowlow + 1;
public.in2_sub_cols = public.in2_pad_cumv_sel_colhig - public.in2_pad_cumv_sel_collow + 1;
public.in2_sub_elem = public.in2_sub_rows * public.in2_sub_cols;
public.in2_sub_mem = sizeof(fp) * public.in2_sub_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_in2_sub = (fp *)malloc(public.in2_sub_mem);
}
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION, SQUARE, NUMERATOR
//====================================================================================================
public.in2_sub_cumh_sel_rowlow = 1;
public.in2_sub_cumh_sel_rowhig = public.in2_sub_rows;
public.in2_sub_cumh_sel_collow = 1 + public.in_mod_cols;
public.in2_sub_cumh_sel_colhig = public.in2_sub_cols - 1;
public.in2_sub_cumh_sel2_rowlow = 1;
public.in2_sub_cumh_sel2_rowhig = public.in2_sub_rows;
public.in2_sub_cumh_sel2_collow = 1;
public.in2_sub_cumh_sel2_colhig = public.in2_sub_cols - public.in_mod_cols - 1;
public.in2_sub2_sqr_rows = public.in2_sub_cumh_sel_rowhig - public.in2_sub_cumh_sel_rowlow + 1;
public.in2_sub2_sqr_cols = public.in2_sub_cumh_sel_colhig - public.in2_sub_cumh_sel_collow + 1;
public.in2_sub2_sqr_elem = public.in2_sub2_sqr_rows * public.in2_sub2_sqr_cols;
public.in2_sub2_sqr_mem = sizeof(fp) * public.in2_sub2_sqr_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_in2_sub2_sqr = (fp *)malloc(public.in2_sub2_sqr_mem);
}
//======================================================================================================================================================
// CUMULATIVE SUM 2
//======================================================================================================================================================
//====================================================================================================
// PAD ARRAY
//====================================================================================================
//====================================================================================================
// VERTICAL CUMULATIVE SUM
//====================================================================================================
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION
//====================================================================================================
//====================================================================================================
// HORIZONTAL CUMULATIVE SUM
//====================================================================================================
//====================================================================================================
// SELECTION, SELECTION 2, SUBTRACTION, DIFFERENTIAL LOCAL SUM, DENOMINATOR A, DENOMINATOR, CORRELATION
//====================================================================================================
//======================================================================================================================================================
// TEMPLATE MASK CREATE
//======================================================================================================================================================
public.tMask_rows = public.in_mod_rows + (public.sSize+1+public.sSize) - 1;
public.tMask_cols = public.tMask_rows;
public.tMask_elem = public.tMask_rows * public.tMask_cols;
public.tMask_mem = sizeof(fp) * public.tMask_elem;
for(i=0; i<public.allPoints; i++){
private[i].d_tMask = (fp *)malloc(public.tMask_mem);
}
//======================================================================================================================================================
// POINT MASK INITIALIZE
//======================================================================================================================================================
public.mask_rows = public.maxMove;
public.mask_cols = public.mask_rows;
public.mask_elem = public.mask_rows * public.mask_cols;
public.mask_mem = sizeof(fp) * public.mask_elem;
//======================================================================================================================================================
// MASK CONVOLUTION
//======================================================================================================================================================
public.mask_conv_rows = public.tMask_rows; // number of rows in I
public.mask_conv_cols = public.tMask_cols; // number of columns in I
public.mask_conv_elem = public.mask_conv_rows * public.mask_conv_cols; // number of elements
public.mask_conv_mem = sizeof(fp) * public.mask_conv_elem;
public.mask_conv_ioffset = (public.mask_rows-1)/2;
if((public.mask_rows-1) % 2 > 0.5){
public.mask_conv_ioffset = public.mask_conv_ioffset + 1;
}
public.mask_conv_joffset = (public.mask_cols-1)/2;
if((public.mask_cols-1) % 2 > 0.5){
public.mask_conv_joffset = public.mask_conv_joffset + 1;
}
for(i=0; i<public.allPoints; i++){
private[i].d_mask_conv = (fp *)malloc(public.mask_conv_mem);
}
//======================================================================================================================================================
// PRINT FRAME PROGRESS START
//======================================================================================================================================================
printf("frame progress: ");
fflush(NULL);
//======================================================================================================================================================
// KERNEL
//======================================================================================================================================================
for(public.frame_no=0; public.frame_no<frames_processed; public.frame_no++){
//====================================================================================================
// GETTING FRAME
//====================================================================================================
// Extract a cropped version of the first frame from the video file
public.d_frame = get_frame(public.d_frames, // pointer to video file
public.frame_no, // number of frame that needs to be returned
0, // cropped?
0, // scaled?
1); // converted
//====================================================================================================
// PROCESSING
//====================================================================================================
omp_set_num_threads(omp_num_threads);
#pragma omp parallel for
for(i=0; i<public.allPoints; i++){
kernel( public,
private[i]);
}
//====================================================================================================
// FREE MEMORY FOR FRAME
//====================================================================================================
// free frame after each loop iteration, since AVI library allocates memory for every frame fetched
free(public.d_frame);
//====================================================================================================
// PRINT FRAME PROGRESS
//====================================================================================================
printf("%d ", public.frame_no);
fflush(NULL);
}
//======================================================================================================================================================
// PRINT FRAME PROGRESS END
//======================================================================================================================================================
printf("\n");
fflush(NULL);
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
//==================================================50
// DUMP DATA TO FILE
//==================================================50
#ifdef OUTPUT
write_data( "result.txt",
public.frames,
frames_processed,
public.endoPoints,
public.d_tEndoRowLoc,
public.d_tEndoColLoc,
public.epiPoints,
public.d_tEpiRowLoc,
public.d_tEpiColLoc);
#endif
//====================================================================================================
// COMMON
//====================================================================================================
free(public.d_endoRow);
free(public.d_endoCol);
free(public.d_tEndoRowLoc);
free(public.d_tEndoColLoc);
free(public.d_endoT);
free(public.d_epiRow);
free(public.d_epiCol);
free(public.d_tEpiRowLoc);
free(public.d_tEpiColLoc);
free(public.d_epiT);
//====================================================================================================
// POINTERS
//====================================================================================================
for(i=0; i<public.allPoints; i++){
free(private[i].in_partial_sum);
free(private[i].in_sqr_partial_sum);
free(private[i].par_max_val);
free(private[i].par_max_coo);
free(private[i].d_in2);
free(private[i].d_in2_sqr);
free(private[i].d_in_mod);
free(private[i].d_in_sqr);
free(private[i].d_conv);
free(private[i].d_in2_pad);
free(private[i].d_in2_sub);
free(private[i].d_in2_sub2_sqr);
free(private[i].d_tMask);
free(private[i].d_mask_conv);
}
}
//========================================================================================================================================================================================================
//========================================================================================================================================================================================================
// END OF FILE
//========================================================================================================================================================================================================
//========================================================================================================================================================================================================
|
scheduleg-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n=16,chunk,a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(guided,chunk)
for (i=0; i<n; i++)
{ suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
level.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "timers.h"
#include "defines.h"
#include "level.h"
#include "operators.h"
//------------------------------------------------------------------------------------------------------------------------------
void print_communicator(int printSendRecv, int rank, int level, communicator_type *comm){
int i;
printf("rank=%2d level=%d ",rank,level);
if(printSendRecv & 0x1){
printf("num_sends=%2d ",comm->num_sends);
printf("send_ranks=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_ranks[i]);printf("] ");
printf("send_sizes=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_sizes[i]);printf("] ");
printf("send_buffers=[ ");for(i=0;i<comm->num_sends;i++)printf("%08lx ",(uint64_t)comm->send_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[0];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[0][i].dim.i,comm->blocks[0][i].dim.j,comm->blocks[0][i].dim.k,comm->blocks[0][i].read.i,comm->blocks[0][i].read.j,comm->blocks[0][i].read.k,comm->blocks[0][i].read.jStride,comm->blocks[0][i].read.kStride,comm->blocks[0][i].write.i,comm->blocks[0][i].write.j,comm->blocks[0][i].write.k,comm->blocks[0][i].write.jStride,comm->blocks[0][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x2){
for(i=0;i<comm->num_blocks[1];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[1][i].dim.i,comm->blocks[1][i].dim.j,comm->blocks[1][i].dim.k,comm->blocks[1][i].read.i,comm->blocks[1][i].read.j,comm->blocks[1][i].read.k,comm->blocks[1][i].read.jStride,comm->blocks[1][i].read.kStride,comm->blocks[1][i].write.i,comm->blocks[1][i].write.j,comm->blocks[1][i].write.k,comm->blocks[1][i].write.jStride,comm->blocks[1][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x4){
printf("num_recvs=%2d ",comm->num_recvs);
printf("recv_ranks=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_ranks[i]);printf("] ");
printf("recv_sizes=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_sizes[i]);printf("] ");
printf("recv_buffers=[ ");for(i=0;i<comm->num_recvs;i++)printf("%08lx ",(uint64_t)comm->recv_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[2];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[2][i].dim.i,comm->blocks[2][i].dim.j,comm->blocks[2][i].dim.k,comm->blocks[2][i].read.i,comm->blocks[2][i].read.j,comm->blocks[2][i].read.k,comm->blocks[2][i].read.jStride,comm->blocks[2][i].read.kStride,comm->blocks[2][i].write.i,comm->blocks[2][i].write.j,comm->blocks[2][i].write.k,comm->blocks[2][i].write.jStride,comm->blocks[2][i].write.kStride);
printf("\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
typedef struct {
int sendRank;
int sendBoxID;
int sendBox;
int sendDir;
int recvRank;
int recvBoxID;
int recvBox;
} GZ_type;
int qsortGZ(const void *a, const void*b){
GZ_type *gza = (GZ_type*)a;
GZ_type *gzb = (GZ_type*)b;
// by convention, MPI buffers are first sorted by sendRank
if(gza->sendRank < gzb->sendRank)return(-1);
if(gza->sendRank > gzb->sendRank)return( 1);
// then by sendBoxID
if(gza->sendBoxID < gzb->sendBoxID)return(-1);
if(gza->sendBoxID > gzb->sendBoxID)return( 1);
// and finally by the direction sent
if(gza->sendDir < gzb->sendDir)return(-1);
if(gza->sendDir > gzb->sendDir)return( 1);
return(0);
}
int qsortInt(const void *a, const void *b){
int *ia = (int*)a;
int *ib = (int*)b;
if(*ia < *ib)return(-1);
if(*ia > *ib)return( 1);
return( 0);
}
int qsortBlock(const void *a, const void *b){
blockCopy_type *ba = (blockCopy_type*)a;
blockCopy_type *bb = (blockCopy_type*)b;
if(ba->write.box >= 0){
// sort by box...
if(ba->write.box < bb->write.box)return(-1);
if(ba->write.box > bb->write.box)return( 1);
// now sort by k
if(ba->write.k < bb->write.k )return(-1);
if(ba->write.k > bb->write.k )return( 1);
// now sort by j
if(ba->write.j < bb->write.j )return(-1);
if(ba->write.j > bb->write.j )return( 1);
// now sort by i
if(ba->write.i < bb->write.i )return(-1);
if(ba->write.i > bb->write.i )return( 1);
}else if(ba->read.box >= 0){
// sort by box...
if(ba->read.box < bb->read.box )return(-1);
if(ba->read.box > bb->read.box )return( 1);
// now sort by k
if(ba->read.k < bb->read.k )return(-1);
if(ba->read.k > bb->read.k )return( 1);
// now sort by j
if(ba->read.j < bb->read.j )return(-1);
if(ba->read.j > bb->read.j )return( 1);
// now sort by i
if(ba->read.i < bb->read.i )return(-1);
if(ba->read.i > bb->read.i )return( 1);
}
return( 0);
}
//------------------------------------------------------------------------------------------------------------------------------
void decompose_level_lex(int *rank_of_box, int idim, int jdim, int kdim, int ranks){
// simple lexicographical decomposition of the domain (i-j-k ordering)
// load balancing is easily realized
// unfortunately, each process will likely receive one or two long pencils of boxes.
// as such, the resultant surface:volum ratio will likely be poor
int boxes = idim*jdim*kdim;
int i,j,k;
for(k=0;k<kdim;k++){
for(j=0;j<jdim;j++){
for(i=0;i<idim;i++){
int b = k*jdim*idim + j*idim + i;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)b)/(uint64_t)boxes; // ranks*b can be as larger than ranks^2... can over flow int
}}}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection_special(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){
// if possible, recursively partition the domain by a prime number (e.g. try an parition a 9^3 array into 3 equal pieces instead of 5x9^2 and 4x9^2)
// if not, default to simple bisection
// this function should ensure that each process receives a compact rectahedral collection of boxes
// however, load imbalance can occur
// the choice of whether to try and partition with the largest prime or smallest prime first is up to the user
#define numPrimes 13
//int primes[numPrimes] = {41,37,31,29,23,19,17,13,11,7,5,3,2};
int primes[numPrimes] = {2,3,5,7,11,13,17,19,23,29,31,37,41};
int i,j,k,p,f,ff;
// base case, no further recursion...
if( (ranks==1)|| ((idim==1)&&(jdim==1)&&(kdim==1)) ){
for(i=ilo;i<ilo+idim;i++){
for(j=jlo;j<jlo+jdim;j++){
for(k=klo;k<klo+kdim;k++){
int b = i + j*jStride + k*kStride;
rank_of_box[b] = rank_lo;
}}}
return;
}
// special cases for perfectly matched problem sizes with numbers of processes (but not powers of 2)...
for(p=0;p<numPrimes;p++){
f=primes[p];
if( (kdim>=idim)&&(kdim>=jdim) ){if( (kdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+ff*kdim/f,idim,jdim,kdim/f,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (jdim>=idim)&&(jdim>=kdim) ){if( (jdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+ff*jdim/f,klo,idim,jdim/f,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (idim>=jdim)&&(idim>=kdim) ){if( (idim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+ff*idim/f,jlo,klo,idim/f,jdim,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)idim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)jdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)kdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,rank_lo+r0,r1); // hi
return;
}
fprintf(stderr,"decompose_level_bisection_special failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// base case...
if( (idim==1) && (jdim==1) && (kdim==1) ){
int b = ilo + jlo*jStride + klo*kStride;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)sfc_offset)/(uint64_t)sfc_max_length; // sfc_max_length is the precomputed maximum length
return;
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int sfc_delta = dim0*jdim*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int sfc_delta = idim*dim0*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int sfc_delta = idim*jdim*dim0;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// failure...
fprintf(stderr,"decompose_level_bisection failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// Given a bounding box (idim,jdim,kdim) use a Z-morton Space Filling Curve (SFC) to assign the boxes within the (boxes_in_i,boxes_in_j,boxes_in_k) valid region domain
// sfc_offset is the current offset within the space filling curve (starts with 0)
// this function returns the new offset based on how many actual boxes it found within (ilo,jlo,klo) + (idim,jdim,kdim)
// sfc_max_length is the maximum length of the SFC. Note, if this length exceeds boxes_in_i*boxes_in_j*boxes_in_k, then some processes with receive no work
int decompose_level_zmort(int *rank_of_box, int boxes_in_i, int boxes_in_j, int boxes_in_k, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// invalid cases...
if(idim<1)return(sfc_offset);
if(jdim<1)return(sfc_offset);
if(kdim<1)return(sfc_offset);
if(ilo <0)return(sfc_offset);
if(jlo <0)return(sfc_offset);
if(klo <0)return(sfc_offset);
// base case...
if( (idim==1) && (jdim==1) && (kdim==1) ){
if( (ilo<boxes_in_i) && (jlo<boxes_in_j) && (klo<boxes_in_k) ){
// deemed a valid box (could be augmented for irregular domains)
int b = ilo + jlo*boxes_in_i + klo*boxes_in_i*boxes_in_j;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)(sfc_offset))/(uint64_t)sfc_max_length; // sfc_max_length is the precomputed maximum length
return(sfc_offset+1);
}
return(sfc_offset); // region outside valid domain; sfc_offset is unchanged
}
// bisect in 3D...
int imid = ilo + (idim/2);
int jmid = jlo + (jdim/2);
int kmid = klo + (kdim/2);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jlo ,klo , idim/2, jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jlo ,klo ,idim-idim/2, jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jmid,klo , idim/2,jdim-jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jmid,klo ,idim-idim/2,jdim-jdim/2, kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jlo ,kmid, idim/2, jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jlo ,kmid,idim-idim/2, jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,ilo ,jmid,kmid, idim/2,jdim-jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
sfc_offset=decompose_level_zmort(rank_of_box,boxes_in_i,boxes_in_j,boxes_in_k,imid,jmid,kmid,idim-idim/2,jdim-jdim/2,kdim-kdim/2,ranks,sfc_offset,sfc_max_length);
return(sfc_offset);
}
//------------------------------------------------------------------------------------------------------------------------------
//int decompose_level_hilbert(int *rank_of_box, int boxes_in_i, int boxes_in_j, int boxes_in_k, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// implements a 3D hilbert curve on the non-power of two domain using a power of two bounding box
//}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void print_decomposition(level_type *level){
if(level->my_rank!=0)return;
printf("\n");
int i,j,k;
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
for(k=level->boxes_in.k-1;k>=0;k--){ // (i,j,k)=(0,0,0) is bottom left corner
for(j=level->boxes_in.j-1;j>=0;j--){ // (i,j)=(0,0) is bottom left corner
for(i=0;i<j;i++)printf(" ");
for(i=0;i<level->boxes_in.i;i++){
int b = i + j*jStride + k*kStride;
printf("%4d ",level->rank_of_box[b]);
}printf("\n");
}printf("\n\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
// append the specified block (logical region) to the current list of blocks
// each block may be tiled to...
// - create more parallelism across the list of blocks
// - limit parallelism within a block
// - limit the memory requirements for each block
#ifndef BLOCK_LIST_MIN_SIZE
#define BLOCK_LIST_MIN_SIZE 1000
#endif
void append_block_to_list(blockCopy_type ** blocks, int *allocated_blocks, int *num_blocks,
int dim_i, int dim_j, int dim_k,
int read_box, double* read_ptr, int read_i, int read_j, int read_k, int read_jStride, int read_kStride, int read_scale,
int write_box, double* write_ptr, int write_i, int write_j, int write_k, int write_jStride, int write_kStride, int write_scale,
int blockcopy_tile_i, int blockcopy_tile_j, int blockcopy_tile_k,
int subtype
){
// Take a dim_j x dim_k iteration space and tile it into smaller faces of size blockcopy_tile_j x blockcopy_tile_k
// This increases the number of blockCopies in the ghost zone exchange and thereby increases the thread-level parallelism
#if 0
// use recursive (z-mort) ordering of tiles in order to improve locality on deep memory hierarchies...
int doRecursion=0;
if(dim_i > blockcopy_tile_i)doRecursion=1;
if(dim_j > blockcopy_tile_j)doRecursion=1;
if(dim_k > blockcopy_tile_k)doRecursion=1;
if( read_scale != 1)doRecursion=0; // disable recursion for restriction
if(write_scale != 1)doRecursion=0; // disable recursion for interpolation
if(doRecursion){
int mid_i = (dim_i + 1)/2;
int mid_j = (dim_j + 1)/2;
int mid_k = (dim_k + 1)/2;
mid_i = blockcopy_tile_i*( (mid_i+blockcopy_tile_i-1)/blockcopy_tile_i);
mid_j = blockcopy_tile_j*( (mid_j+blockcopy_tile_j-1)/blockcopy_tile_j);
mid_k = blockcopy_tile_k*( (mid_k+blockcopy_tile_k-1)/blockcopy_tile_k);
if(mid_i>dim_i)mid_i=dim_i;
if(mid_j>dim_j)mid_j=dim_j;
if(mid_k>dim_k)mid_k=dim_k;
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i, mid_j, mid_k,
read_box, read_ptr, read_i , read_j , read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j ,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i, mid_j, mid_k,
read_box, read_ptr, read_i+mid_i, read_j , read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j ,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i,dim_j-mid_j, mid_k,
read_box, read_ptr, read_i , read_j+mid_j, read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j+mid_j,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i,dim_j-mid_j, mid_k,
read_box, read_ptr, read_i+mid_i, read_j+mid_j, read_k , read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j+mid_j,write_k ,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i, mid_j,dim_k-mid_k,
read_box, read_ptr, read_i , read_j , read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j ,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i, mid_j,dim_k-mid_k,
read_box, read_ptr, read_i+mid_i, read_j , read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j ,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks, mid_i,dim_j-mid_j,dim_k-mid_k,
read_box, read_ptr, read_i , read_j+mid_j, read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i ,write_j+mid_j,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
append_block_to_list(blocks,allocated_blocks,num_blocks,dim_i-mid_i,dim_j-mid_j,dim_k-mid_k,
read_box, read_ptr, read_i+mid_i, read_j+mid_j, read_k+mid_k, read_jStride, read_kStride, read_scale,
write_box,write_ptr,write_i+mid_i,write_j+mid_j,write_k+mid_k,write_jStride,write_kStride,write_scale,
blockcopy_tile_i,blockcopy_tile_j,blockcopy_tile_k,subtype);
return;
}
#endif
// read_/write_scale are used to stride appropriately when read and write loop iterations spaces are different
// ghostZone: read_scale=1, write_scale=1
// interpolation: read_scale=1, write_scale=2
// restriction: read_scale=2, write_scale=1
// FIX... dim_i,j,k -> read_dim_i,j,k, write_dim_i,j,k
int ii,jj,kk;
for(kk=0;kk<dim_k;kk+=blockcopy_tile_k){
for(jj=0;jj<dim_j;jj+=blockcopy_tile_j){
for(ii=0;ii<dim_i;ii+=blockcopy_tile_i){
int dim_k_mod = dim_k-kk;if(dim_k_mod>blockcopy_tile_k)dim_k_mod=blockcopy_tile_k;
int dim_j_mod = dim_j-jj;if(dim_j_mod>blockcopy_tile_j)dim_j_mod=blockcopy_tile_j;
int dim_i_mod = dim_i-ii;if(dim_i_mod>blockcopy_tile_i)dim_i_mod=blockcopy_tile_i;
if(*num_blocks >= *allocated_blocks){
int oldSize = *allocated_blocks;
if(*allocated_blocks == 0){*allocated_blocks=BLOCK_LIST_MIN_SIZE;*blocks=(blockCopy_type*) malloc( (*allocated_blocks)*sizeof(blockCopy_type));}
else{*allocated_blocks*=2; *blocks=(blockCopy_type*)realloc((void*)(*blocks),(*allocated_blocks)*sizeof(blockCopy_type));}
if(*blocks == NULL){fprintf(stderr,"realloc failed - append_block_to_list (%d -> %d)\n",oldSize,*allocated_blocks);exit(0);}
}
(*blocks)[*num_blocks].subtype = subtype;
(*blocks)[*num_blocks].dim.i = dim_i_mod;
(*blocks)[*num_blocks].dim.j = dim_j_mod;
(*blocks)[*num_blocks].dim.k = dim_k_mod;
(*blocks)[*num_blocks].read.box = read_box;
(*blocks)[*num_blocks].read.ptr = read_ptr;
(*blocks)[*num_blocks].read.i = read_i + read_scale*ii;
(*blocks)[*num_blocks].read.j = read_j + read_scale*jj;
(*blocks)[*num_blocks].read.k = read_k + read_scale*kk;
(*blocks)[*num_blocks].read.jStride = read_jStride;
(*blocks)[*num_blocks].read.kStride = read_kStride;
(*blocks)[*num_blocks].write.box = write_box;
(*blocks)[*num_blocks].write.ptr = write_ptr;
(*blocks)[*num_blocks].write.i = write_i + write_scale*ii;
(*blocks)[*num_blocks].write.j = write_j + write_scale*jj;
(*blocks)[*num_blocks].write.k = write_k + write_scale*kk;
(*blocks)[*num_blocks].write.jStride = write_jStride;
(*blocks)[*num_blocks].write.kStride = write_kStride;
(*num_blocks)++;
}}}
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that traverses the domain boundary intersecting with this process's boxes
// This includes faces, corners, and edges
void build_boundary_conditions(level_type *level, int shape){
level->boundary_condition.blocks[shape] = NULL; // default for periodic (i.e. no BC's)
level->boundary_condition.num_blocks[shape] = 0; // default for periodic (i.e. no BC's)
level->boundary_condition.allocated_blocks[shape] = 0; // default for periodic (i.e. no BC's)
if(level->boundary_condition.type == BC_PERIODIC)return;
//int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
int box, di,dj,dk;
for(box=0;box<level->num_my_boxes;box++){ // traverse my list of boxes...
for(dk=-1;dk<=1;dk++){ // for each box, examine its 26 neighbors...
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk; // face/edge/corner of *THIS* box (not the domain)
// determine if this region (box's di,dj,dk ghost zone) is outside of the domain
int regionIsOutside=0;
int normal = 13; // normal effectively defines the normal vector to the *DOMAIN* for this region...
// this addition is necessary for linearly interpolated BC's as a box's corner is not necessarily a domain's corner
int myBox_i = level->my_boxes[box].low.i / level->box_dim;
int myBox_j = level->my_boxes[box].low.j / level->box_dim;
int myBox_k = level->my_boxes[box].low.k / level->box_dim;
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( neighborBox_i < 0 ){regionIsOutside=1;normal-=1;}
if( neighborBox_j < 0 ){regionIsOutside=1;normal-=3;}
if( neighborBox_k < 0 ){regionIsOutside=1;normal-=9;}
if( neighborBox_i >=level->boxes_in.i ){regionIsOutside=1;normal+=1;}
if( neighborBox_j >=level->boxes_in.j ){regionIsOutside=1;normal+=3;}
if( neighborBox_k >=level->boxes_in.k ){regionIsOutside=1;normal+=9;}
// calculate ghost zone region size and coordinates relative to the first non-ghost zone element (0,0,0)
int block_i=-1,block_j=-1,block_k=-1;
int dim_i=-1, dim_j=-1, dim_k=-1;
switch(di){
case -1:dim_i=level->box_ghosts;block_i=0-level->box_ghosts;break;
case 0:dim_i=level->box_dim; block_i=0; break;
case 1:dim_i=level->box_ghosts;block_i=0+level->box_dim; break;
}
switch(dj){
case -1:dim_j=level->box_ghosts;block_j=0-level->box_ghosts;break;
case 0:dim_j=level->box_dim; block_j=0; break;
case 1:dim_j=level->box_ghosts;block_j=0+level->box_dim; break;
}
switch(dk){
case -1:dim_k=level->box_ghosts;block_k=0-level->box_ghosts;break;
case 0:dim_k=level->box_dim; block_k=0; break;
case 1:dim_k=level->box_ghosts;block_k=0+level->box_dim; break;
}
// use regionIsOutside to short circuit logic and cull unnecessary regions...
switch(shape){
case STENCIL_SHAPE_STAR: if(edges[dir]||corners[dir])regionIsOutside=0;break; // star-shaped stencils don't need BC's enforced on corners or edges
case STENCIL_SHAPE_NO_CORNERS:if( corners[dir])regionIsOutside=0;break; // these stencils don't need BC's enforced on edges
}
// default tile sizes...
// NOTE, BC's may never tile smaller than the ghost zone depth
int blockcopy_i = (BLOCKCOPY_TILE_I < level->box_ghosts) ? level->box_ghosts : BLOCKCOPY_TILE_I;
int blockcopy_j = (BLOCKCOPY_TILE_J < level->box_ghosts) ? level->box_ghosts : BLOCKCOPY_TILE_J;
int blockcopy_k = (BLOCKCOPY_TILE_K < level->box_ghosts) ? level->box_ghosts : BLOCKCOPY_TILE_K;
#if 0
// 2D tiling of faces
// 1D tiling of edges
// corners use defaults
switch(dir){
case 1:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
case 3:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 4:blockcopy_i= 8;blockcopy_j= 8;blockcopy_k=10000;break; // ij face
case 5:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 7:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
case 9:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 10:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k= 8;break; // ik face
case 11:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 12:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k= 8;break; // jk face
case 14:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k= 8;break; // jk face
case 15:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 16:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k= 8;break; // ik face
case 17:blockcopy_i=10000;blockcopy_j=10000;blockcopy_k= 8;break; // k edge
case 19:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
case 21:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 22:blockcopy_i= 8;blockcopy_j= 8;blockcopy_k=10000;break; // ij face
case 23:blockcopy_i=10000;blockcopy_j= 8;blockcopy_k=10000;break; // j edge
case 25:blockcopy_i= 8;blockcopy_j=10000;blockcopy_k=10000;break; // i edge
}
#endif
if(regionIsOutside){
append_block_to_list(&(level->boundary_condition.blocks[shape]),&(level->boundary_condition.allocated_blocks[shape]),&(level->boundary_condition.num_blocks[shape]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ block_i,
/* read.j = */ block_j,
/* read.k = */ block_k,
/* read.jStride = */ level->my_boxes[box].jStride,
/* read.kStride = */ level->my_boxes[box].kStride,
/* read.scale = */ 1,
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ block_i,
/* write.j = */ block_j,
/* write.k = */ block_k,
/* write.jStride = */ level->my_boxes[box].jStride,
/* write.kStride = */ level->my_boxes[box].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ blockcopy_i,
/* blockcopy_j = */ blockcopy_j,
/* blockcopy_k = */ blockcopy_k,
/* subtype = */ normal
);
}}}}}
#ifdef BLOCK_SPATIAL_SORT
// sort all the resultant blocks by box,k,j,i (good locality)
qsort(level->boundary_condition.blocks[shape],level->boundary_condition.num_blocks[shape],sizeof(blockCopy_type),qsortBlock);
#endif
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that packs data into MPI recv buffers, exchanges local data, and unpacks the MPI send buffers
// broadly speaking...
// 1. traverse my list of Boxes and create a list of ghosts that must be sent
// 2. create a list of neighbors to send to
// 3. allocate and populate the pack list and allocate the send buffers
// 4. allocate and populate the local exchange list
// 5. traverse my list of Boxes and create a list of ghosts that must be received
// 6. create a list of neighbors to receive from
// 7. allocate and populate the unpack list and allocate the recv buffers
//
// thus a ghost zone exchange is
// 1. prepost a Irecv for each MPI recv buffer (1 per neighbor)
// 2. traverse the pack list
// 3. post the Isends for each MPI send buffer (1 per neighbor)
// 4. traverse the local copy list
// 5. waitall
// 6. traverse the unpack list
//
// / 24 25 26 /
// / 21 22 23 / (k+1)
// / 18 19 20 /
//
// / 15 16 17 /
// / 12 13 14 / (k)
// / 9 10 11 /
//
// / 6 7 8 /
// / 3 4 5 / (k-1)
// / 0 1 2 /
//
void build_exchange_ghosts(level_type *level, int shape){
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
// initialize to defaults...
level->exchange_ghosts[shape].num_recvs = 0;
level->exchange_ghosts[shape].num_sends = 0;
level->exchange_ghosts[shape].recv_ranks = NULL;
level->exchange_ghosts[shape].send_ranks = NULL;
level->exchange_ghosts[shape].recv_sizes = NULL;
level->exchange_ghosts[shape].send_sizes = NULL;
level->exchange_ghosts[shape].recv_buffers = NULL;
level->exchange_ghosts[shape].send_buffers = NULL;
level->exchange_ghosts[shape].blocks[0] = NULL;
level->exchange_ghosts[shape].blocks[1] = NULL;
level->exchange_ghosts[shape].blocks[2] = NULL;
level->exchange_ghosts[shape].num_blocks[0] = 0;
level->exchange_ghosts[shape].num_blocks[1] = 0;
level->exchange_ghosts[shape].num_blocks[2] = 0;
level->exchange_ghosts[shape].allocated_blocks[0] = 0;
level->exchange_ghosts[shape].allocated_blocks[1] = 0;
level->exchange_ghosts[shape].allocated_blocks[2] = 0;
#ifdef USE_MPI
level->exchange_ghosts[shape].requests = NULL;
level->exchange_ghosts[shape].status = NULL;
#endif
int n,CommunicateThisDir[27];for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] + edges[n] + corners[n];// to be safe, communicate everything
switch(shape){
case STENCIL_SHAPE_BOX: for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] + edges[n] + corners[n];break;
case STENCIL_SHAPE_STAR: for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] ;break;
case STENCIL_SHAPE_NO_CORNERS:for(n=0;n<27;n++)CommunicateThisDir[n] = faces[n] + edges[n] ;break;
}
int sendBox,recvBox;
int stage;
int _rank;
int ghost,numGhosts,numGhostsRemote;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToSend = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *sendRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToSend == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToSend\n");exit(0);}
if(sendRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/sendRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(sendBox=0;sendBox<level->num_my_boxes;sendBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
int myBoxID = level->my_boxes[sendBox].global_box_id;
int myBox_i = level->my_boxes[sendBox].low.i / level->box_dim;
int myBox_j = level->my_boxes[sendBox].low.j / level->box_dim;
int myBox_k = level->my_boxes[sendBox].low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( level->rank_of_box[neighborBoxID] != -1 ){
ghostsToSend[numGhosts].sendRank = level->my_rank;
ghostsToSend[numGhosts].sendBoxID = myBoxID;
ghostsToSend[numGhosts].sendBox = sendBox;
ghostsToSend[numGhosts].sendDir = dir;
ghostsToSend[numGhosts].recvRank = level->rank_of_box[neighborBoxID];
ghostsToSend[numGhosts].recvBoxID = neighborBoxID;
ghostsToSend[numGhosts].recvBox = -1;
if( level->rank_of_box[neighborBoxID] != level->my_rank ){
sendRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}else{
int recvBox=0;while(level->my_boxes[recvBox].global_box_id!=neighborBoxID)recvBox++; // search my list of boxes for the appropriate recvBox index
ghostsToSend[numGhosts].recvBox = recvBox;
}
numGhosts++;
}}
}}}}
}
// sort boxes by sendRank(==my rank) then by sendBoxID... ensures the sends and receive buffers are always sorted by sendBoxID...
qsort(ghostsToSend,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(sendRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numSendRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(sendRanks[ghost] != _rank){_rank=sendRanks[ghost];sendRanks[numSendRanks++]=sendRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the pack/local lists as well as the MPI buffers, and then populate the pack/local lists
level->exchange_ghosts[shape].num_sends = numSendRanks;
level->exchange_ghosts[shape].send_ranks = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[shape].send_sizes = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[shape].send_buffers = (double**)malloc(numSendRanks*sizeof(double*));
if(numSendRanks>0){
if(level->exchange_ghosts[shape].send_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_ranks\n",shape);exit(0);}
if(level->exchange_ghosts[shape].send_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_sizes\n",shape);exit(0);}
if(level->exchange_ghosts[shape].send_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers\n",shape);exit(0);}
}
level->exchange_ghosts[shape].blocks[0] = NULL;
level->exchange_ghosts[shape].blocks[1] = NULL;
level->exchange_ghosts[shape].num_blocks[0] = 0;
level->exchange_ghosts[shape].num_blocks[1] = 0;
level->exchange_ghosts[shape].allocated_blocks[0] = 0;
level->exchange_ghosts[shape].allocated_blocks[1] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI send buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numSendRanks;neighbor++){
if(stage==1){
level->exchange_ghosts[shape].send_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[shape].send_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[shape].send_sizes[neighbor]>0)
if(level->exchange_ghosts[shape].send_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers[neighbor]\n",shape);exit(0);}
memset(level->exchange_ghosts[shape].send_buffers[neighbor], 0,level->exchange_ghosts[shape].send_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[shape].send_ranks[neighbor]=sendRanks[neighbor];
level->exchange_ghosts[shape].send_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToSend[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToSend[ghost].sendDir % 3) )-1;
int dj = ((ghostsToSend[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToSend[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:send_i=0; dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:send_i=0; dim_i=level->box_dim; recv_i=0; break;
case 1:send_i=level->box_dim-level->box_ghosts;dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:send_j=0; dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:send_j=0; dim_j=level->box_dim; recv_j=0; break;
case 1:send_j=level->box_dim-level->box_ghosts;dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:send_k=0; dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:send_k=0; dim_k=level->box_dim; recv_k=0; break;
case 1:send_k=level->box_dim-level->box_ghosts;dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
int LocalExchange; // 0 = pack list, 1 = local exchange list
if(ghostsToSend[ghost].recvRank != level->my_rank){
LocalExchange=0; // pack
neighbor=0;while(level->exchange_ghosts[shape].send_ranks[neighbor] != ghostsToSend[ghost].recvRank)neighbor++;
}else{
LocalExchange=1; // local
neighbor=-1;
}
if(stage==1){
if(LocalExchange) // append to the local exchange list...
append_block_to_list(&(level->exchange_ghosts[shape].blocks[1]),&(level->exchange_ghosts[shape].allocated_blocks[1]),&(level->exchange_ghosts[shape].num_blocks[1]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ ghostsToSend[ghost].sendBox,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride,
/* read.scale = */ 1,
/* write.box = */ ghostsToSend[ghost].recvBox,
/* write.ptr = */ NULL,
/* write.i = */ recv_i,
/* write.j = */ recv_j,
/* write.k = */ recv_k,
/* write.jStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].jStride,
/* write.kStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
else // append to the MPI pack list...
append_block_to_list(&(level->exchange_ghosts[shape].blocks[0]),&(level->exchange_ghosts[shape].allocated_blocks[0]),&(level->exchange_ghosts[shape].num_blocks[0]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ ghostsToSend[ghost].sendBox,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride,
/* read.scale = */ 1,
/* write.box = */ -1,
/* write.ptr = */ level->exchange_ghosts[shape].send_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/* write.i = */ level->exchange_ghosts[shape].send_sizes[neighbor], // current offset in the MPI send buffer
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ dim_i, // contiguous block
/* write.kStride = */ dim_i*dim_j, // contiguous block
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);}
if(neighbor>=0)level->exchange_ghosts[shape].send_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToSend);
free(sendRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToRecv = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *recvRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToRecv == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToRecv\n");exit(0);}
if(recvRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/recvRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(recvBox=0;recvBox<level->num_my_boxes;recvBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
int myBoxID = level->my_boxes[recvBox].global_box_id;
int myBox_i = level->my_boxes[recvBox].low.i / level->box_dim;
int myBox_j = level->my_boxes[recvBox].low.j / level->box_dim;
int myBox_k = level->my_boxes[recvBox].low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( (level->rank_of_box[neighborBoxID] != -1) && (level->rank_of_box[neighborBoxID] != level->my_rank) ){
ghostsToRecv[numGhosts].sendRank = level->rank_of_box[neighborBoxID];
ghostsToRecv[numGhosts].sendBoxID = neighborBoxID;
ghostsToRecv[numGhosts].sendBox = -1;
ghostsToRecv[numGhosts].sendDir = 26-dir;
ghostsToRecv[numGhosts].recvRank = level->my_rank;
ghostsToRecv[numGhosts].recvBoxID = myBoxID;
ghostsToRecv[numGhosts].recvBox = recvBox;
numGhosts++;
recvRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}}
}}}}
}
// sort boxes by sendRank then by sendBoxID... ensures the recvs and receive buffers are always sorted by sendBoxID...
qsort(ghostsToRecv,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(recvRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numRecvRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(recvRanks[ghost] != _rank){_rank=recvRanks[ghost];recvRanks[numRecvRanks++]=recvRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the unpack lists as well as the MPI buffers, and then populate the unpack list
level->exchange_ghosts[shape].num_recvs = numRecvRanks;
level->exchange_ghosts[shape].recv_ranks = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[shape].recv_sizes = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[shape].recv_buffers = (double**)malloc(numRecvRanks*sizeof(double*));
if(numRecvRanks>0){
if(level->exchange_ghosts[shape].recv_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_ranks\n",shape);exit(0);}
if(level->exchange_ghosts[shape].recv_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_sizes\n",shape);exit(0);}
if(level->exchange_ghosts[shape].recv_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers\n",shape);exit(0);}
}
level->exchange_ghosts[shape].blocks[2] = NULL;
level->exchange_ghosts[shape].num_blocks[2] = 0;
level->exchange_ghosts[shape].allocated_blocks[2] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI recv buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numRecvRanks;neighbor++){
if(stage==1){
level->exchange_ghosts[shape].recv_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[shape].recv_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[shape].recv_sizes[neighbor]>0)
if(level->exchange_ghosts[shape].recv_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers[neighbor]\n",shape);exit(0);}
memset(level->exchange_ghosts[shape].recv_buffers[neighbor], 0,level->exchange_ghosts[shape].recv_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[shape].recv_ranks[neighbor]=recvRanks[neighbor];
level->exchange_ghosts[shape].recv_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
//int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToRecv[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToRecv[ghost].sendDir % 3) )-1;
int dj = ((ghostsToRecv[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToRecv[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:dim_i=level->box_dim; recv_i=0; break;
case 1:dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:dim_j=level->box_dim; recv_j=0; break;
case 1:dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:dim_k=level->box_dim; recv_k=0; break;
case 1:dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
neighbor=0;while(level->exchange_ghosts[shape].recv_ranks[neighbor] != ghostsToRecv[ghost].sendRank)neighbor++;
if(stage==1)append_block_to_list(&(level->exchange_ghosts[shape].blocks[2]),&(level->exchange_ghosts[shape].allocated_blocks[2]),&(level->exchange_ghosts[shape].num_blocks[2]),
/*dim.i = */ dim_i,
/*dim.j = */ dim_j,
/*dim.k = */ dim_k,
/*read.box = */ -1,
/*read.ptr = */ level->exchange_ghosts[shape].recv_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/*read.i = */ level->exchange_ghosts[shape].recv_sizes[neighbor], // current offset in the MPI recv buffer
/*read.j = */ 0,
/*read.k = */ 0,
/*read.jStride = */ dim_i, // contiguous block
/*read.kStride = */ dim_i*dim_j, // contiguous block
/*read.scale = */ 1,
/*write.box = */ ghostsToRecv[ghost].recvBox,
/*write.ptr = */ NULL,
/*write.i = */ recv_i,
/*write.j = */ recv_j,
/*write.k = */ recv_k,
/*write.jStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].jStride,
/*write.kStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].kStride,
/*write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
if(neighbor>=0)level->exchange_ghosts[shape].recv_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToRecv);
free(recvRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// malloc MPI requests/status arrays
#ifdef USE_MPI
level->exchange_ghosts[shape].requests = (MPI_Request*)malloc((level->exchange_ghosts[shape].num_sends+level->exchange_ghosts[shape].num_recvs)*sizeof(MPI_Request));
level->exchange_ghosts[shape].status = (MPI_Status *)malloc((level->exchange_ghosts[shape].num_sends+level->exchange_ghosts[shape].num_recvs)*sizeof(MPI_Status ));
if((level->exchange_ghosts[shape].num_sends+level->exchange_ghosts[shape].num_recvs)>0){
if(level->exchange_ghosts[shape].requests==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].requests\n",shape);exit(0);}
if(level->exchange_ghosts[shape].status ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].status\n",shape);exit(0);}
}
#endif
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef BLOCK_SPATIAL_SORT
// sort all the resultant blocks by box,k,j,i (good locality)
qsort(level->exchange_ghosts[shape].blocks[0],level->exchange_ghosts[shape].num_blocks[0],sizeof(blockCopy_type),qsortBlock);
qsort(level->exchange_ghosts[shape].blocks[1],level->exchange_ghosts[shape].num_blocks[1],sizeof(blockCopy_type),qsortBlock);
qsort(level->exchange_ghosts[shape].blocks[2],level->exchange_ghosts[shape].num_blocks[2],sizeof(blockCopy_type),qsortBlock);
#endif
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// create the pointers in level_type to the contiguous vector FP data (useful for bulk copies to/from accelerators)
// create the pointers in each box to their respective segment of the level's vector FP data (useful for box-relative operators)
// if( (level->numVectors > 0) && (numVectors > level->numVectors) ) then allocate additional space for (numVectors-level->numVectors) and copy old leve->numVectors data
void create_vectors(level_type *level, int numVectors){
if(numVectors <= level->numVectors)return; // already have enough space
double * old_vectors_base = level->vectors_base; // save a pointer to the originally allocated data for subsequent free()
double * old_vector0 = NULL;
if(level->numVectors>0)old_vector0 = level->vectors[0]; // save a pointer to old FP data to copy
// calculate the size of each box...
level->box_jStride = (level->box_dim+2*level->box_ghosts);while(level->box_jStride % BOX_ALIGN_JSTRIDE)level->box_jStride++; // pencil
level->box_kStride = level->box_jStride*(level->box_dim+2*level->box_ghosts);while(level->box_kStride % BOX_ALIGN_KSTRIDE)level->box_kStride++; // plane
level->box_volume = level->box_kStride*(level->box_dim+2*level->box_ghosts);while(level->box_volume % BOX_ALIGN_VOLUME )level->box_volume++; // volume
#define VECTOR_MALLOC_BULK
#ifdef VECTOR_MALLOC_BULK
// allocate one aligned, double-precision array and divide it among vectors...
uint64_t malloc_size = (uint64_t)numVectors*level->num_my_boxes*level->box_volume*sizeof(double) + 4096;
level->vectors_base = (double*)malloc(malloc_size);
if((numVectors>0)&&(level->vectors_base==NULL)){fprintf(stderr,"malloc failed - level->vectors_base\n");exit(0);}
double * tmpbuf = level->vectors_base;
while( (uint64_t)(tmpbuf+level->box_ghosts*(1+level->box_jStride+level->box_kStride)) & 0xff ){tmpbuf++;} // align first *non-ghost* zone element of first component to a 256-Byte boundary
uint64_t ofs;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(ofs=0;ofs<(uint64_t)numVectors*level->num_my_boxes*level->box_volume;ofs++){tmpbuf[ofs]=0.0;} // Faster in MPI+OpenMP environments, but not NUMA-aware
// if there is existing FP data... copy it, then free old data and pointer array
if(level->numVectors>0){
memcpy(tmpbuf,old_vector0,(uint64_t)level->numVectors*level->num_my_boxes*level->box_volume*sizeof(double)); // FIX... omp thread ???
if(old_vectors_base)free(old_vectors_base); // free old data...
}
// allocate an array of pointers which point to the union of boxes for each vector
// NOTE, this requires just one copyin per vector to an accelerator rather than requiring one copyin per box per vector
if(level->numVectors>0)free(level->vectors); // free any previously allocated vector array
level->vectors = (double **)malloc(numVectors*sizeof(double*));
if((numVectors>0)&&(level->vectors==NULL)){fprintf(stderr,"malloc failed - level->vectors\n");exit(0);}
uint64_t c;for(c=0;c<numVectors;c++){level->vectors[c] = tmpbuf + (uint64_t)c*level->num_my_boxes*level->box_volume;}
#else
// allocate vectors individually (simple, but may cause conflict misses)
double ** old_vectors = level->vectors;
level->vectors = (double **)malloc(numVectors*sizeof(double*));
uint64_t c;
for(c= 0;c<level->numVectors;c++){level->vectors[c] = old_vectors[c];}
for(c=level->numVectors;c< numVectors;c++){
level->vectors[c] = (double*)malloc((uint64_t)level->num_my_boxes*level->box_volume*sizeof(double));
uint64_t ofs;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(ofs=0;ofs<(uint64_t)level->num_my_boxes*level->box_volume;ofs++){level->vectors[c][ofs]=0.0;} // Faster in MPI+OpenMP environments, but not NUMA-aware
}
free(old_vectors);
#endif
// build the list of boxes...
int box=0;
int i,j,k;
for(k=0;k<level->boxes_in.k;k++){
for(j=0;j<level->boxes_in.j;j++){
for(i=0;i<level->boxes_in.i;i++){
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
int b=i + j*jStride + k*kStride;
if(level->rank_of_box[b]==level->my_rank){
if(level->numVectors>0)free(level->my_boxes[box].vectors); // free previously allocated vector array
level->my_boxes[box].vectors = (double **)malloc(numVectors*sizeof(double*));
if((numVectors>0)&&(level->my_boxes[box].vectors==NULL)){fprintf(stderr,"malloc failed - level->my_boxes[box].vectors\n");exit(0);}
uint64_t c;for(c=0;c<numVectors;c++){level->my_boxes[box].vectors[c] = level->vectors[c] + (uint64_t)box*level->box_volume;}
level->my_boxes[box].numVectors = numVectors;
level->my_boxes[box].dim = level->box_dim;
level->my_boxes[box].ghosts = level->box_ghosts;
level->my_boxes[box].jStride = level->box_jStride;
level->my_boxes[box].kStride = level->box_kStride;
level->my_boxes[box].volume = level->box_volume;
level->my_boxes[box].low.i = i*level->box_dim;
level->my_boxes[box].low.j = j*level->box_dim;
level->my_boxes[box].low.k = k*level->box_dim;
level->my_boxes[box].global_box_id = b;
box++;
}}}}
// level now has created/initialized vector FP data
level->numVectors = numVectors;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// create a level by populating the basic data structure, distribute boxes within the level among processes, allocate memory, and create any auxilliaries
// box_ghosts must be >= stencil_get_radius()
// numVectors represents an estimate of the number of vectors needed in this level. Additional vectors can be added via subsequent calls to create_vectors()
void create_level(level_type *level, int boxes_in_i, int box_dim, int box_ghosts, int numVectors, int domain_boundary_condition, int my_rank, int num_ranks){
int box;
int TotalBoxes = boxes_in_i*boxes_in_i*boxes_in_i;
if(my_rank==0){
//if(domain_boundary_condition==BC_DIRICHLET)fprintf(stdout,"\nattempting to create a %d^3 level (with Dirichlet BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n",box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
//if(domain_boundary_condition==BC_PERIODIC )fprintf(stdout,"\nattempting to create a %d^3 level (with Periodic BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n", box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
fprintf(stdout,"\nattempting to create a %d^3 level from %d x %d^3 boxes distributed among %d tasks...\n", box_dim*boxes_in_i,TotalBoxes,box_dim,num_ranks);
if(domain_boundary_condition==BC_DIRICHLET)fprintf(stdout," boundary condition = BC_DIRICHLET\n");
if(domain_boundary_condition==BC_PERIODIC )fprintf(stdout," boundary condition = BC_PERIODIC\n");
}
int omp_threads = 1;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
omp_threads = omp_get_num_threads();
}
}
#endif
if(box_ghosts < stencil_get_radius() ){
if(my_rank==0)fprintf(stderr,"ghosts(%d) must be >= stencil_get_radius(%d)\n",box_ghosts,stencil_get_radius());
exit(0);
}
level->box_dim = box_dim;
level->box_ghosts = box_ghosts;
level->numVectors = 0; // no vectors have been allocated yet
level->vectors_base = NULL; // pointer returned by bulk malloc
level->vectors = NULL; // pointers to individual vectors
level->boxes_in.i = boxes_in_i;
level->boxes_in.j = boxes_in_i;
level->boxes_in.k = boxes_in_i;
level->dim.i = box_dim*level->boxes_in.i;
level->dim.j = box_dim*level->boxes_in.j;
level->dim.k = box_dim*level->boxes_in.k;
level->active = 1;
level->my_rank = my_rank;
level->num_ranks = num_ranks;
level->boundary_condition.type = domain_boundary_condition;
level->must_subtract_mean = -1;
level->num_threads = omp_threads;
level->my_blocks = NULL;
level->num_my_blocks = 0;
level->allocated_blocks = 0;
level->tag = log2(level->dim.i);
level->fluxes = NULL;
// allocate 3D array of integers to hold the MPI rank of the corresponding box and initialize to -1 (unassigned)
level->rank_of_box = (int*)malloc(level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int));
if(level->rank_of_box==NULL){fprintf(stderr,"malloc of level->rank_of_box failed\n");exit(0);}
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){level->rank_of_box[box]=-1;} // -1 denotes that there is no actual box assigned to this region
// parallelize the level (i.e. assign a process rank to each box)...
#ifdef DECOMPOSE_LEX
// lexicographical ordering... good load balance, potentially high bisection bandwidth requirements, bad surface:volume ratio when #boxes/proc is large
if(my_rank==0){fprintf(stdout," Decomposing level via lexicographical ordering... ");fflush(stdout);}
decompose_level_lex(level->rank_of_box,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks);
#elif DECOMPOSE_BISECTION_SPECIAL
// recursive partitioning by primes
if(my_rank==0){fprintf(stdout," Decomposing level via partitioning by primes... ");fflush(stdout);}
decompose_level_bisection_special(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,num_ranks);
#elif DECOMPOSE_BISECTION
// recursive bisection
if(my_rank==0){fprintf(stdout," Decomposing level via recursive bisection... ");fflush(stdout);}
decompose_level_bisection(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks,0,level->boxes_in.i*level->boxes_in.j*level->boxes_in.k);
#else//#elif DECOMPOSE_ZMORT
if(my_rank==0){fprintf(stdout," Decomposing level via Z-mort ordering... ");fflush(stdout);}
#if 0 // Z-Mort over a power of two bounding box skipping boxes outside the domain
int idim_padded=1;while(idim_padded<level->boxes_in.i)idim_padded*=2;
int jdim_padded=1;while(jdim_padded<level->boxes_in.j)jdim_padded*=2;
int kdim_padded=1;while(kdim_padded<level->boxes_in.k)kdim_padded*=2;
#else // Z-Mort over the valid domain wtih odd-sized base cases (i.e. zmort on 3x3)
int idim_padded=level->boxes_in.i;
int jdim_padded=level->boxes_in.j;
int kdim_padded=level->boxes_in.k;
#endif
decompose_level_zmort(level->rank_of_box,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,0,0,idim_padded,jdim_padded,kdim_padded,num_ranks,0,level->boxes_in.i*level->boxes_in.j*level->boxes_in.k);
#endif
if(my_rank==0){fprintf(stdout,"done\n");fflush(stdout);}
//print_decomposition(level);// for debug purposes only
// calculate how many boxes I own...
level->num_my_boxes=0;
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){if(level->rank_of_box[box]==level->my_rank)level->num_my_boxes++;}
level->my_boxes = (box_type*)malloc(level->num_my_boxes*sizeof(box_type));
if((level->num_my_boxes>0)&&(level->my_boxes==NULL)){fprintf(stderr,"malloc failed - create_level/level->my_boxes\n");exit(0);}
// allocate flattened vector FP data and create pointers...
if(my_rank==0){fprintf(stdout," Allocating vectors... ");fflush(stdout);}
create_vectors(level,numVectors);
if(my_rank==0){fprintf(stdout,"done\n");fflush(stdout);}
// Build and auxilarlly data structure that flattens boxes into blocks...
for(box=0;box<level->num_my_boxes;box++){
int blockcopy_i = BLOCKCOPY_TILE_I;
int blockcopy_j = BLOCKCOPY_TILE_J;
int blockcopy_k = BLOCKCOPY_TILE_K;
append_block_to_list(&(level->my_blocks),&(level->allocated_blocks),&(level->num_my_blocks),
/* dim.i = */ level->my_boxes[box].dim,
/* dim.j = */ level->my_boxes[box].dim,
/* dim.k = */ level->my_boxes[box].dim,
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ 0,
/* read.j = */ 0,
/* read.k = */ 0,
/* read.jStride = */ level->my_boxes[box].jStride,
/* read.kStride = */ level->my_boxes[box].kStride,
/* read.scale = */ 1,
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ 0,
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ level->my_boxes[box].jStride,
/* write.kStride = */ level->my_boxes[box].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ blockcopy_i,
/* blockcopy_j = */ blockcopy_j,
/* blockcopy_k = */ blockcopy_k,
/* subtype = */ 0
);
}
// build an assist structure for Gauss Seidel Red Black that would facilitate unrolling and SIMDization...
level->RedBlack_base = NULL;
level->RedBlack_FP = NULL;
if(level->num_my_boxes){
int i,j;
int kStride = level->my_boxes[0].kStride;
int jStride = level->my_boxes[0].jStride;
level->RedBlack_base = (double*)malloc(2*kStride*sizeof(double)+256); // used for free()
level->RedBlack_FP = level->RedBlack_base; // aligned version
// align first *non-ghost* zone element to a 64-Byte boundary...
while( (uint64_t)(level->RedBlack_FP + level->box_ghosts*(1+level->box_jStride)) & 0x3f ){level->RedBlack_FP++;}
// initialize RedBlack array...
for(j=0-level->box_ghosts;j<level->box_dim+level->box_ghosts;j++){
for(i=0-level->box_ghosts;i<level->box_dim+level->box_ghosts;i++){
int ij = (i+level->box_ghosts) + (j+level->box_ghosts)*jStride;
if((i^j^1)&0x1){
level->RedBlack_FP[ij ]=1.0;
level->RedBlack_FP[ij+kStride]=0.0;
}else{
level->RedBlack_FP[ij ]=0.0;
level->RedBlack_FP[ij+kStride]=1.0;
}
// Never update ghost zones
//if( (i<0) || (i>=level->box_dim) || (j<0) || (j>=level->box_dim) ){
// level->RedBlack_FP[ij ]=0.0;
// level->RedBlack_FP[ij+kStride]=0.0;
//}
}}
}
int shape;
// create mini program for each stencil shape to perform a ghost zone exchange...
for(shape=0;shape<STENCIL_MAX_SHAPES;shape++)build_exchange_ghosts( level,shape);
// create mini program for each stencil shape to perform a boundary condition...
for(shape=0;shape<STENCIL_MAX_SHAPES;shape++)build_boundary_conditions(level,shape);
// duplicate MPI_COMM_WORLD to be the communicator for each level
#ifdef USE_MPI
if(my_rank==0){fprintf(stdout," Duplicating MPI_COMM_WORLD... ");fflush(stdout);}
double time_start = MPI_Wtime();
MPI_Comm_dup(MPI_COMM_WORLD,&level->MPI_COMM_ALLREDUCE);
double time_end = MPI_Wtime();
double time_in_comm_dup = 0;
double time_in_comm_dup_send = time_end-time_start;
MPI_Allreduce(&time_in_comm_dup_send,&time_in_comm_dup,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD);
if(my_rank==0){fprintf(stdout,"done (%0.6f seconds)\n",time_in_comm_dup);fflush(stdout);}
#endif
// report on potential load imbalance
int BoxesPerProcess = level->num_my_boxes;
#ifdef USE_MPI
int BoxesPerProcessSend = level->num_my_boxes;
MPI_Allreduce(&BoxesPerProcessSend,&BoxesPerProcess,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD);
#endif
if(my_rank==0){fprintf(stdout," Calculating boxes per process... target=%0.3f, max=%d\n",(double)TotalBoxes/(double)num_ranks,BoxesPerProcess);}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// zeros are the timers within this level
// useful if one wishes to separate setup(build) timing from solve timing
void reset_level_timers(level_type *level){
// cycle counters information...
level->timers.smooth = 0;
level->timers.apply_op = 0;
level->timers.residual = 0;
level->timers.blas1 = 0;
level->timers.blas3 = 0;
level->timers.boundary_conditions = 0;
level->timers.restriction_total = 0;
level->timers.restriction_pack = 0;
level->timers.restriction_local = 0;
level->timers.restriction_unpack = 0;
level->timers.restriction_recv = 0;
level->timers.restriction_send = 0;
level->timers.restriction_wait = 0;
level->timers.interpolation_total = 0;
level->timers.interpolation_pack = 0;
level->timers.interpolation_local = 0;
level->timers.interpolation_unpack = 0;
level->timers.interpolation_recv = 0;
level->timers.interpolation_send = 0;
level->timers.interpolation_wait = 0;
level->timers.ghostZone_total = 0;
level->timers.ghostZone_pack = 0;
level->timers.ghostZone_local = 0;
level->timers.ghostZone_unpack = 0;
level->timers.ghostZone_recv = 0;
level->timers.ghostZone_send = 0;
level->timers.ghostZone_wait = 0;
level->timers.collectives = 0;
level->timers.Total = 0;
// solver events information...
level->Krylov_iterations = 0;
level->CAKrylov_formations_of_G = 0;
level->vcycles_from_this_level = 0;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// free all memory allocated by this level
// n.b. in some cases a malloc was used as the basis for an array of pointers. As such free(x[0])
void destroy_level(level_type *level){
int i,j;
if(level->my_rank==0){fprintf(stdout,"attempting to free the %5d^3 level... ",level->dim.i);fflush(stdout);}
// box ...
for(i=0;i<level->num_my_boxes;i++)if(level->my_boxes[i].vectors)free(level->my_boxes[i].vectors);
// misc ...
if(level->rank_of_box )free(level->rank_of_box);
if(level->my_boxes )free(level->my_boxes);
if(level->my_blocks )free(level->my_blocks);
if(level->RedBlack_base)free(level->RedBlack_base);
// FP vector data...
#ifdef VECTOR_MALLOC_BULK
if(level->vectors_base)free(level->vectors_base);
if(level->vectors )free(level->vectors);
#else
for(i=0;i<level->numVectors;i++)if(level->vectors[i])free(level->vectors[i]);
if(level->vectors )free(level->vectors);
#endif
// boundary condition mini program...
for(i=0;i<STENCIL_MAX_SHAPES;i++){
if(level->boundary_condition.blocks[i])free(level->boundary_condition.blocks[i]);
}
// ghost zone exchange mini programs...
for(i=0;i<STENCIL_MAX_SHAPES;i++){
if(level->exchange_ghosts[i].num_recvs>0){
for(j=0;j<level->exchange_ghosts[i].num_recvs;j++)if(level->exchange_ghosts[i].recv_buffers[j])free(level->exchange_ghosts[i].recv_buffers[j]);
if(level->exchange_ghosts[i].recv_buffers)free(level->exchange_ghosts[i].recv_buffers);
if(level->exchange_ghosts[i].recv_ranks )free(level->exchange_ghosts[i].recv_ranks );
if(level->exchange_ghosts[i].recv_sizes )free(level->exchange_ghosts[i].recv_sizes );
}
if(level->exchange_ghosts[i].num_sends>0){
for(j=0;j<level->exchange_ghosts[i].num_sends;j++)if(level->exchange_ghosts[i].send_buffers[j])free(level->exchange_ghosts[i].send_buffers[j]);
if(level->exchange_ghosts[i].send_buffers)free(level->exchange_ghosts[i].send_buffers);
if(level->exchange_ghosts[i].send_ranks )free(level->exchange_ghosts[i].send_ranks );
if(level->exchange_ghosts[i].send_sizes )free(level->exchange_ghosts[i].send_sizes );
}
if(level->exchange_ghosts[i].blocks[0] )free(level->exchange_ghosts[i].blocks[0] );
if(level->exchange_ghosts[i].blocks[1] )free(level->exchange_ghosts[i].blocks[1] );
if(level->exchange_ghosts[i].blocks[2] )free(level->exchange_ghosts[i].blocks[2] );
#ifdef USE_MPI
if(level->exchange_ghosts[i].requests )free(level->exchange_ghosts[i].requests );
if(level->exchange_ghosts[i].status )free(level->exchange_ghosts[i].status );
#endif
}
if(level->my_rank==0){fprintf(stdout,"done\n");}
}
//#if CD
//size_t cd_preserve_box_type(cd_handle_t* cd_h, const box_type& box, const char* name){
// size_t size=0;
// char prv_name[100];
// sprintf(prv_name, "box_%s", name);
// size_t tmp_size = sizeof(box_type);
// cd_preserve(cd_h, (void*)&box, tmp_size, kCopy, prv_name, prv_name);
// size += tmp_size;
//
// // preserve vectors
// sprintf(prv_name, "box_vectors_%s", name);
// size += cd_preserve_global_ptr(cd_h, box.vectors, prv_name);
// for (int ii=0; ii<box.numVectors; ii++){
// sprintf(prv_name, "box_vectors_%d_%s", ii, name);
// size += cd_preserve_global_ptr(cd_h, box.vectors[ii].get(), prv_name);
// }
//
// // preserve vectors_base
// sprintf(prv_name, "box_vectors_base_%s", name);
// size += cd_preserve_global_ptr(cd_h, box.vectors_base, prv_name);
// uint64_t malloc_size = box.volume*box.numVectors + BOX_ALIGN_1ST_CELL/sizeof(double);
// cd_preserve(cd_h, box.vectors_base.raw_ptr(), malloc_size, kCopy, prv_name, prv_name);
// size += malloc_size;
// return size;
//}
//
//size_t cd_preserve_communication_type_extra(cd_handle_t *cd_h, const communicator_type& comm, const char* name){
// size_t ret_size=0;
// char prv_name[100];
// size_t size=0;
//
// sprintf(prv_name, "commtype_recvrank_%s", name);
// size=comm.num_recvs*sizeof(int);
// cd_preserve(cd_h, comm.recv_ranks, size, kCopy, prv_name, prv_name);
// ret_size += size;
// sprintf(prv_name, "commtype_sendrank_%s", name);
// size=comm.num_sends*sizeof(int);
// cd_preserve(cd_h, comm.send_ranks, size, kCopy, prv_name, prv_name);
// ret_size += size;
// sprintf(prv_name, "commtype_recvsize_%s", name);
// size=comm.num_recvs*sizeof(int);
// cd_preserve(cd_h, comm.recv_sizes, size, kCopy, prv_name, prv_name);
// ret_size += size;
// sprintf(prv_name, "commtype_sendsize_%s", name);
// size=comm.num_sends*sizeof(int);
// cd_preserve(cd_h, comm.send_sizes, size, kCopy, prv_name, prv_name);
// ret_size += size;
//
// sprintf(prv_name, "commtype_rflag_%s", name);
// ret_size += cd_preserve_global_ptr(cd_h, comm.rflag, name);
//
// sprintf(prv_name, "commtype_matchflag_%s", name);
// size = comm.num_sends*sizeof(global_ptr<int>);
// cd_preserve(cd_h, comm.match_rflag, size, kCopy, prv_name, prv_name);
// ret_size += size;
// //for (int ii=0; ii<comm.num_sends; ii++){
// // sprintf(prv_name, "commtype_matchflag_%d_%s", ii, name);
// // ret_size += cd_preserve_global_ptr(cd_h, comm.match_rflag[ii], name);
// //}
//
// sprintf(prv_name, "commtype_sblock2_%s", name);
// //size = (comm.num_recvs+2) * sizeof(int);
// size = comm.num_recvs * sizeof(int);
// cd_preserve(cd_h, comm.sblock2, size, kCopy, prv_name, prv_name);
// ret_size += size;
//
// // FIXME: comm.eblock2 is never used???
// //sprintf(prv_name, "commtype_eblock2_%s", name);
//
// sprintf(prv_name, "commtype_sendmatchpos_%s", name);
// size = (comm.num_sends) * sizeof(int);
// cd_preserve(cd_h, comm.send_match_pos, size, kCopy, prv_name, prv_name);
// ret_size += size;
//
// // only need to preserve pointer information..
// sprintf(prv_name, "commtype_grecvbuffer_%s", name);
// size = comm.num_recvs*sizeof(global_ptr<double>);
// cd_preserve(cd_h, comm.global_recv_buffers, size, kCopy, prv_name, prv_name);
// ret_size += size;
// sprintf(prv_name, "commtype_gsendbuffer_%s", name);
// size = comm.num_sends*sizeof(global_ptr<double>);
// cd_preserve(cd_h, comm.global_send_buffers, size, kCopy, prv_name, prv_name);
// ret_size += size;
// sprintf(prv_name, "commtype_gmatchbuffer_%s", name);
// size = comm.num_sends*sizeof(global_ptr<double>);
// cd_preserve(cd_h, comm.global_match_buffers, size, kCopy, prv_name, prv_name);
// ret_size += size;
//
// // FIXME: how to preserve copy_e and data_e
//
// sprintf(prv_name, "commtype_recvbuffer_%s", name);
// size=comm.num_recvs*sizeof(double*);
// cd_preserve(cd_h, comm.recv_buffers, size, kCopy, prv_name, prv_name);
// ret_size += size;
// sprintf(prv_name, "commtype_sendbuffer_%s", name);
// size=comm.num_sends*sizeof(double*);
// cd_preserve(cd_h, comm.send_buffers, size, kCopy, prv_name, prv_name);
// ret_size += size;
//
// for (int ii=0; ii<4; ii++){
// if (comm.blocks[ii]==NULL) continue;
// sprintf(prv_name, "commtype_blocks_%d_%s", ii, name);
// size = comm.num_blocks[ii]*sizeof(blockCopy_type);
// cd_preserve(cd_h, comm.blocks[ii], size, kCopy, prv_name, prv_name);
// ret_size += size;
// }
// return ret_size;
//}
//
//size_t cd_preserve_level(cd_handle_t* cd_h, level_type *level, const char* name){
// size_t prv_size=0;
// char prv_name[100];
// sprintf(prv_name, "level_%s", name);
// size_t tmp_size = sizeof(level_type);
// cd_preserve(cd_h, level, tmp_size, kCopy, prv_name, prv_name);
// prv_size += tmp_size;
//
// // preserve subteam information
// // FIXME: pointers inside subteam (e.g. ptrs to parent and child teams) may be wrong in recovery,
// // because only the pointer data are preserved.
// // So if we have node recovery in higher level, the pointers need to be fixed!
// sprintf(prv_name, "level_subteam_%s", name);
// tmp_size = sizeof(team);
// cd_preserve(cd_h, level->subteam, tmp_size, kCopy, prv_name, prv_name);
// prv_size += tmp_size;
//
// // preserve rank_of_box
// sprintf(prv_name, "level_rankofbox_%s", name);
// size_t size = level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int);
// cd_preserve(cd_h, level->rank_of_box, size, kCopy, prv_name, prv_name);
// prv_size += size;
//
// // preserve my_boxes; place and T* should be preserved; need to preserve the allocated data
// sprintf(prv_name, "level_myboxes_%s", name);
// prv_size += cd_preserve_global_ptr(cd_h, level->my_boxes, prv_name);
// // preserve all pointers inside box_type
// for (int ii=0; ii<level->num_my_boxes; ii++){
// prv_size += cd_preserve_box_type(cd_h, level->my_boxes[ii].get(), prv_name);
// }
//
// // preserve addr_of_box
// sprintf(prv_name, "level_aob_%s", name);
// size = level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;
// tmp_size = size*sizeof(global_ptr<box_type>);
// cd_preserve(cd_h, level->addr_of_box, tmp_size, kCopy, prv_name, prv_name);
// prv_size += tmp_size;
// for (size_t ii=0; ii<size; ii++){
// prv_size += cd_preserve_global_ptr(cd_h, level->addr_of_box[ii], prv_name);
// }
//
// // preserve my_local_boxes
// sprintf(prv_name, "level_mlb_%s", name);
// size = level->num_my_boxes * sizeof(box_type *);
// cd_preserve(cd_h, level->my_local_boxes, size, kCopy, prv_name, prv_name);
// prv_size += size;
//
// // preserve my_blocks
// // SZNOTE: no need to do extra things for blockCopy_type,
// // because pointers are saved with blockCopy_type, while the contents have been preserved in preservations
// sprintf(prv_name, "level_myblocks_%s", name);
// size = level->num_my_boxes * sizeof(blockCopy_type);
// cd_preserve(cd_h, level->my_blocks, size, kCopy, prv_name, prv_name);
//
// // preserve RedBlack_FP
// sprintf(prv_name, "level_rbfp_%s", name);
// size = sizeof(double*)*2;
// cd_preserve(cd_h, level->RedBlack_FP, size, kCopy, prv_name, prv_name);
// prv_size += size;
// if (level->my_boxes!=NULL){
// size = level->my_boxes[0].get().kStride*sizeof(double);
// for (int ii=0; ii<2; ii++){
// sprintf(prv_name, "level_rbfp_%d_%s", ii, name);
// cd_preserve(cd_h, level->RedBlack_FP[ii], size, kCopy, prv_name, prv_name);
// prv_size += size;
// }
// }
//
// //std::cout << "before preserving exchange_ghosts\n";
// // preserve exchange_ghosts
// sprintf(prv_name, "level_eg_%s", name);
// size = 2*sizeof(communicator_type);
// cd_preserve(cd_h, level->exchange_ghosts, size, kCopy, prv_name, prv_name);
// prv_size += size;
// for (int ii=0; ii<2; ii++){
// sprintf(prv_name, "level_eg_%d_%s", ii, name);
// prv_size += cd_preserve_communication_type_extra(cd_h, level->exchange_ghosts[ii], prv_name);
// }
//
// //std::cout << "before preserving restriction\n";
// // preserve restriction
// sprintf(prv_name, "level_restriction_%s", name);
// size = 4*sizeof(communicator_type);
// cd_preserve(cd_h, level->restriction, size, kCopy, prv_name, prv_name);
// prv_size += size;
// for (int ii=0; ii<4; ii++){
// sprintf(prv_name, "level_restriction_%d_%s", ii, name);
// prv_size += cd_preserve_communication_type_extra(cd_h, level->restriction[ii], prv_name);
// }
//
// //std::cout << "before preserving interpolation\n";
// // preserve interpolation
// sprintf(prv_name, "level_interpolation_%s", name);
// size = sizeof(communicator_type);
// cd_preserve(cd_h, &(level->interpolation), size, kCopy, prv_name, prv_name);
// sprintf(prv_name, "level_interpolation_extra_%s", name);
// prv_size += cd_preserve_communication_type_extra(cd_h, level->interpolation, prv_name);
//
// //std::cout << "before preserving boundary_condition\n";
// // preserve boundary_condition.blocks
// for (int ii=0; ii<2; ii++){
// if (level->boundary_condition.blocks[ii]==NULL) continue;
// sprintf(prv_name, "level_bc_blocks_%d_%s", ii, name);
// size = level->boundary_condition.num_blocks[ii]*sizeof(blockCopy_type);
// cd_preserve(cd_h, level->boundary_condition.blocks[ii], size, kCopy, prv_name, prv_name);
// prv_size += size;
// }
//
// return prv_size;
//}
//
//size_t cd_preserve_levels(cd_handle_t* cd_h, level_type **levels, int num_levels, const char* name){
// size_t prv_size=0;
// char prv_name[100];
// // preserve pointers to all levels; guarded by num_levels
// sprintf(prv_name, "level_type_ptr_%s", name);
// size_t tmp_size = sizeof(level_type*)*num_levels;
// cd_preserve(cd_h, levels, tmp_size, kCopy, prv_name, prv_name);
// prv_size += tmp_size;
//
// // preserve each level
// for (int ii=0; ii<num_levels; ii++){
// //std::cout << "before preserving level::" << ii << "\n";
// prv_size += cd_preserve_level(cd_h, levels[ii], name);
// }
// return prv_size;
//}
//
//#endif
|
flatsky_utils.c | #include "config.h"
#include "utils.h"
#include <fitsio.h>
void *dftw_malloc(size_t n)
{
#ifdef _SPREC
void *p=fftwf_malloc(n);
#else //_SPREC
void *p=fftw_malloc(n);
#endif //_SPREC
if(p==NULL)
report_error(NMT_ERROR_MEMORY,"Ran out of memory\n");
return p;
}
void dftw_free(void *p)
{
#ifdef _SPREC
fftwf_free(p);
#else //_SPREC
fftw_free(p);
#endif //_SPREC
}
void fs_mapcpy(nmt_flatsky_info *fs,flouble *destmap,flouble *srcmap)
{
#pragma omp parallel default(none) \
shared(fs,destmap,srcmap)
{
long ip;
#pragma omp for
for(ip=0;ip<fs->npix;ip++) {
destmap[ip]=srcmap[ip];
} //end omp for
} //end omp parallel
}
void fs_map_product(nmt_flatsky_info *fs,flouble *mp1,flouble *mp2,flouble *mp_out)
{
#pragma omp parallel default(none) \
shared(fs,mp1,mp2,mp_out)
{
long ip;
#pragma omp for
for(ip=0;ip<fs->npix;ip++) {
mp_out[ip]=mp1[ip]*mp2[ip];
} //end omp for
} //end omp parallel
}
flouble fs_map_dot(nmt_flatsky_info *fs,flouble *mp1,flouble *mp2)
{
double sum=0;
#pragma omp parallel default(none) \
shared(mp1,mp2,sum,fs)
{
long ip;
double sum_thr=0;
#pragma omp for
for(ip=0;ip<fs->npix;ip++) {
sum_thr+=mp1[ip]*mp2[ip];
} //end omp for
#pragma omp critical
{
sum+=sum_thr;
} //end omp critical
} //end omp parallel
return (flouble)(sum*fs->pixsize);
}
static void qu2eb(nmt_flatsky_info *fs,int spin,fcomplex **alm)
{
int sig_overall=-1;
if(spin==0)
sig_overall=1;
#pragma omp parallel default(none) \
shared(fs,spin,alm,sig_overall)
{
int iy;
fcomplex sig=sig_overall*cpow(I,spin);
flouble dkx=2*M_PI/fs->lx;
flouble dky=2*M_PI/fs->ly;
#pragma omp for
for(iy=0;iy<fs->ny;iy++) {
int ix;
flouble ky;
if(2*iy<=fs->ny)
ky=iy*dky;
else
ky=-(fs->ny-iy)*dky;
for(ix=0;ix<=fs->nx/2;ix++) {
flouble csphi,ssphi,cph,sph;
fcomplex e,b;
int s=0;
flouble kx=ix*dkx;
long index=ix+(fs->nx/2+1)*iy;
flouble kmod2=kx*kx+ky*ky;
if(kmod2<=0) {
cph=1;
sph=0;
}
else {
flouble i_kmod=1./sqrt(kmod2);
cph=kx*i_kmod;
sph=ky*i_kmod;
}
csphi=1; ssphi=0;
while(s<spin) {
flouble c2=csphi*cph-ssphi*sph;
flouble s2=ssphi*cph+csphi*sph;
csphi=c2;
ssphi=s2;
s++;
}
e=sig*(alm[0][index]*csphi-alm[1][index]*ssphi);
b=sig*(alm[0][index]*ssphi+alm[1][index]*csphi);
alm[0][index]=e;
alm[1][index]=b;
}
} //end omp for
} //end omp parallel
}
static void eb2qu(nmt_flatsky_info *fs,int spin,fcomplex **alm)
{
int sig_overall=-1;
if(spin==0)
sig_overall=1;
#pragma omp parallel default(none) \
shared(fs,spin,alm,sig_overall)
{
int iy;
fcomplex sig=sig_overall*cpow(-I,spin);
flouble dkx=2*M_PI/fs->lx;
flouble dky=2*M_PI/fs->ly;
#pragma omp for
for(iy=0;iy<fs->ny;iy++) {
int ix;
flouble ky;
if(2*iy<=fs->ny)
ky=iy*dky;
else
ky=-(fs->ny-iy)*dky;
for(ix=0;ix<=fs->nx/2;ix++) {
flouble csphi,ssphi,cph,sph;
fcomplex q,u;
int s=0;
flouble kx=ix*dkx;
long index=ix+(fs->nx/2+1)*iy;
flouble kmod2=kx*kx+ky*ky;
if(kmod2<=0) {
cph=1;
sph=0;
}
else {
flouble i_kmod=1./sqrt(kmod2);
cph=kx*i_kmod;
sph=ky*i_kmod;
}
csphi=1; ssphi=0;
while(s<spin) {
flouble c2=csphi*cph-ssphi*sph;
flouble s2=ssphi*cph+csphi*sph;
csphi=c2;
ssphi=s2;
s++;
}
q=sig*( alm[0][index]*csphi+alm[1][index]*ssphi);
u=sig*(-alm[0][index]*ssphi+alm[1][index]*csphi);
alm[0][index]=q;
alm[1][index]=u;
}
} //end omp for
} //end omp parallel
}
void fs_map2alm(nmt_flatsky_info *fs,int ntrans,int spin,flouble **map,fcomplex **alm)
{
//TODO init threads??
#ifdef _SPREC
fftwf_plan plan_ft;
#else //_SPREC
fftw_plan plan_ft;
#endif //_SPREC
int imap,nmaps=1;
if(spin)
nmaps=2;
for(imap=0;imap<nmaps*ntrans;imap++) {
#ifdef _SPREC
plan_ft=fftwf_plan_dft_r2c_2d(fs->ny,fs->nx,map[imap],alm[imap],FFTW_ESTIMATE);
fftwf_execute(plan_ft);
fftwf_destroy_plan(plan_ft);
#else //_SPREC
plan_ft=fftw_plan_dft_r2c_2d(fs->ny,fs->nx,map[imap],alm[imap],FFTW_ESTIMATE);
fftw_execute(plan_ft);
fftw_destroy_plan(plan_ft);
#endif //_SPREC
#pragma omp parallel default(none) \
shared(fs,alm,imap)
{
long ipix;
flouble norm=fs->lx*fs->ly/(2*M_PI*fs->nx*fs->ny);
#pragma omp for
for(ipix=0;ipix<fs->ny*(fs->nx/2+1);ipix++) {
alm[imap][ipix]*=norm;
} //end omp for
} //end omp parallel
}
if(nmaps>1) { //Q,U -> E,B
for(imap=0;imap<ntrans*nmaps;imap+=nmaps)
qu2eb(fs,spin,&(alm[imap]));
}
}
void fs_alm2map(nmt_flatsky_info *fs,int ntrans,int spin,flouble **map,fcomplex **alm)
{
//TODO init threads??
#ifdef _SPREC
fftwf_plan plan_ft;
#else //_SPREC
fftw_plan plan_ft;
#endif //_SPREC
int imap,nmaps=1;
if(spin)
nmaps=2;
if(nmaps>1) { //E,B -> Q,U
for(imap=0;imap<ntrans*nmaps;imap+=nmaps)
eb2qu(fs,spin,&(alm[imap]));
}
for(imap=0;imap<nmaps*ntrans;imap++) {
#ifdef _SPREC
plan_ft=fftwf_plan_dft_c2r_2d(fs->ny,fs->nx,alm[imap],map[imap],FFTW_ESTIMATE);
fftwf_execute(plan_ft);
fftwf_destroy_plan(plan_ft);
#else //_SPREC
plan_ft=fftw_plan_dft_c2r_2d(fs->ny,fs->nx,alm[imap],map[imap],FFTW_ESTIMATE);
fftw_execute(plan_ft);
fftw_destroy_plan(plan_ft);
#endif //_SPREC
#pragma omp parallel default(none) \
shared(fs,map,imap)
{
long ipix;
flouble norm=2*M_PI/(fs->lx*fs->ly);
#pragma omp for
for(ipix=0;ipix<fs->npix;ipix++) {
map[imap][ipix]*=norm;
} //end omp for
} //end omp parallel
}
if(nmaps>1) { //Q,U -> E,B
for(imap=0;imap<ntrans*nmaps;imap+=nmaps)
qu2eb(fs,spin,&(alm[imap]));
}
}
#define SAMP_RATE_SIGMA 128
#define FWHM2SIGMA_FLAT 0.00012352884853326381
nmt_k_function *fs_generate_beam_window(double fwhm_amin)
{
int ii;
nmt_k_function *beam;
flouble *larr=my_malloc(5*SAMP_RATE_SIGMA*sizeof(flouble));
flouble *farr=my_malloc(5*SAMP_RATE_SIGMA*sizeof(flouble));
double sigma=FWHM2SIGMA_FLAT*fwhm_amin;
for(ii=0;ii<5*SAMP_RATE_SIGMA;ii++) {
flouble l=(ii+0.0)/(SAMP_RATE_SIGMA*sigma);
larr[ii]=l;
farr[ii]=exp(-0.5*l*l*sigma*sigma);
}
beam=nmt_k_function_alloc(5*SAMP_RATE_SIGMA,larr,farr,1.,0.,0);
free(larr);
free(farr);
return beam;
}
void fs_zero_alm(nmt_flatsky_info *fs,fcomplex *alm)
{
#pragma omp parallel default(none) \
shared(fs,alm)
{
int ii;
#pragma omp for
for(ii=0;ii<fs->ny*(fs->nx/2+1);ii++) {
alm[ii]=0;
} //end omp for
} //end omp parallel
}
void fs_alter_alm(nmt_flatsky_info *fs,double fwhm_amin,fcomplex *alm_in,fcomplex *alm_out,
nmt_k_function *window,int add_to_out)
{
nmt_k_function *beam;
if(window==NULL) beam=fs_generate_beam_window(fwhm_amin);
else beam=window;
#pragma omp parallel default(none) \
shared(fs,alm_in,alm_out,beam,add_to_out)
{
int iy;
flouble dkx=2*M_PI/fs->lx;
flouble dky=2*M_PI/fs->ly;
gsl_interp_accel *intacc_thr=gsl_interp_accel_alloc();
#pragma omp for
for(iy=0;iy<fs->ny;iy++) {
int ix;
flouble ky;
if(2*iy<=fs->ny)
ky=iy*dky;
else
ky=-(fs->ny-iy)*dky;
for(ix=0;ix<=fs->nx/2;ix++) {
flouble kx=ix*dkx;
long index=ix+(fs->nx/2+1)*iy;
flouble kmod=sqrt(kx*kx+ky*ky);
if(add_to_out)
alm_out[index]+=alm_in[index]*nmt_k_function_eval(beam,kmod,intacc_thr);
else
alm_out[index]=alm_in[index]*nmt_k_function_eval(beam,kmod,intacc_thr);
}
} //end omp for
gsl_interp_accel_free(intacc_thr);
} //end omp parallel
if(window==NULL) nmt_k_function_free(beam);
}
void fs_alm2cl(nmt_flatsky_info *fs,nmt_binning_scheme_flat *bin,
fcomplex **alms_1,fcomplex **alms_2,int spin_1,int spin_2,flouble **cls,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y)
{
int i1,nmaps_1=1,nmaps_2=1;
int *n_cells=my_malloc(bin->n_bands*sizeof(int));
if(spin_1) nmaps_1=2;
if(spin_2) nmaps_2=2;
for(i1=0;i1<nmaps_1;i1++) {
int i2;
fcomplex *alm1=alms_1[i1];
for(i2=0;i2<nmaps_2;i2++) {
int il;
fcomplex *alm2=alms_2[i2];
int index_cl=i2+nmaps_2*i1;
flouble norm_factor=4*M_PI*M_PI/(fs->lx*fs->ly);
for(il=0;il<bin->n_bands;il++) {
cls[index_cl][il]=0;
n_cells[il]=0;
}
#pragma omp parallel default(none) \
shared(fs,bin,alm1,alm2,index_cl,cls) \
shared(lmn_x,lmx_x,lmn_y,lmx_y,n_cells)
{
int iy;
flouble dkx=2*M_PI/fs->lx;
flouble dky=2*M_PI/fs->ly;
#pragma omp for
for(iy=0;iy<fs->ny;iy++) {
int ix;
flouble ky;
int ik=0;
if(2*iy<=fs->ny) ky=iy*dky;
else ky=-(fs->ny-iy)*dky;
if((ky>=lmn_y) && (ky<=lmx_y))
continue;
for(ix=0;ix<fs->nx;ix++) {
int ix_here;
long index;
flouble kmod,kx;
if(2*ix<=fs->nx) {
kx=ix*dkx;
ix_here=ix;
}
else {
kx=-(fs->nx-ix)*dkx;
ix_here=fs->nx-ix;
}
if((kx>=lmn_x) && (kx<=lmx_x))
continue;
index=ix_here+(fs->nx/2+1)*iy;
kmod=sqrt(kx*kx+ky*ky);
ik=nmt_bins_flat_search_fast(bin,kmod,ik);
if(ik>=0) {
#pragma omp atomic
cls[index_cl][ik]+=(creal(alm1[index])*creal(alm2[index])+cimag(alm1[index])*cimag(alm2[index]));
#pragma omp atomic
n_cells[ik]++;
}
}
} //end omp for
} //end omp parallel
for(il=0;il<bin->n_bands;il++) {
if(n_cells[il]<=0)
cls[index_cl][il]=0;
else
cls[index_cl][il]*=norm_factor/n_cells[il];
}
}
}
free(n_cells);
}
void fs_anafast(nmt_flatsky_info *fs,nmt_binning_scheme_flat *bin,
flouble **maps_1,flouble **maps_2,int spin_1,int spin_2,flouble **cls)
{
int i1;
fcomplex **alms_1,**alms_2;
int nmaps_1=1,nmaps_2=1;
if(spin_1) nmaps_1=2;
if(spin_2) nmaps_2=2;
alms_1=my_malloc(nmaps_1*sizeof(fcomplex *));
for(i1=0;i1<nmaps_1;i1++)
alms_1[i1]=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fs,1,spin_1,maps_1,alms_1);
if(maps_1==maps_2)
alms_2=alms_1;
else {
alms_2=my_malloc(nmaps_2*sizeof(fcomplex *));
for(i1=0;i1<nmaps_2;i1++)
alms_2[i1]=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fs,1,spin_2,maps_2,alms_2);
}
fs_alm2cl(fs,bin,alms_1,alms_2,spin_1,spin_2,cls,1.,-1.,1.,-1.);
for(i1=0;i1<nmaps_1;i1++)
dftw_free(alms_1[i1]);
free(alms_1);
if(maps_1!=maps_2) {
for(i1=0;i1<nmaps_2;i1++)
dftw_free(alms_2[i1]);
free(alms_2);
}
}
fcomplex **fs_synalm(int nx,int ny,flouble lx,flouble ly,int nmaps,
nmt_k_function **cells,nmt_k_function **beam,int seed)
{
int imap;
fcomplex **alms;
alms=my_malloc(nmaps*sizeof(fcomplex *));
for(imap=0;imap<nmaps;imap++)
alms[imap]=dftw_malloc(ny*(nx/2+1)*sizeof(fcomplex));
//Switch off error handler for Cholesky decomposition
gsl_error_handler_t *geh=gsl_set_error_handler_off();
int numthr=0;
#pragma omp parallel default(none) \
shared(nx,ny,lx,ly,nmaps,cells,beam,seed,alms,numthr)
{
//This is to avoid using the omp.h library
int ithr;
#pragma omp critical
{
ithr=numthr;
numthr++;
}
int iy;
double dkx=2*M_PI/lx,dky=2*M_PI/ly;
double inv_dkvol=1./(dkx*dky);
gsl_vector *rv1=gsl_vector_alloc(nmaps);
gsl_vector *iv1=gsl_vector_alloc(nmaps);
gsl_vector *rv2=gsl_vector_alloc(nmaps);
gsl_vector *iv2=gsl_vector_alloc(nmaps);
gsl_matrix *clmat=gsl_matrix_calloc(nmaps,nmaps);
gsl_vector *eval =gsl_vector_alloc(nmaps);
gsl_matrix *evec =gsl_matrix_alloc(nmaps,nmaps);
gsl_eigen_symmv_workspace *wsym=gsl_eigen_symmv_alloc(nmaps);
unsigned int seed_thr=(unsigned int)(seed+ithr);
gsl_rng *rng=init_rng(seed_thr);
gsl_interp_accel *intacc_cells=gsl_interp_accel_alloc();
gsl_interp_accel *intacc_beam=gsl_interp_accel_alloc();
#pragma omp for
for(iy=0;iy<ny;iy++) {
int ix;
flouble ky;
if(2*iy<=ny)
ky=iy*dky;
else
ky=-(ny-iy)*dky;
for(ix=0;ix<=nx/2;ix++) {
int imp1,imp2;
flouble kx=ix*dkx;
long index=ix+(nx/2+1)*iy;
flouble kmod=sqrt(kx*kx+ky*ky);
if(kmod<0) {
for(imp1=0;imp1<nmaps;imp1++)
alms[imp1][index]=0;
}
else {
//Get power spectrum
int icl=0;
for(imp1=0;imp1<nmaps;imp1++) {
for(imp2=imp1;imp2<nmaps;imp2++) {//Fill up only lower triangular part
flouble cl=0.5*inv_dkvol*nmt_k_function_eval(cells[icl],kmod,intacc_cells);
gsl_matrix_set(clmat,imp1,imp2,cl);
if(imp2!=imp1)
gsl_matrix_set(clmat,imp2,imp1,cl);
icl++;
}
}
//Take square root
gsl_eigen_symmv(clmat,eval,evec,wsym);
for(imp1=0;imp1<nmaps;imp1++) {
double dr,di; //At the same time get white random numbers
rng_gauss(rng,&dr,&di);
gsl_vector_set(rv1,imp1,dr);
gsl_vector_set(iv1,imp1,di);
for(imp2=0;imp2<nmaps;imp2++) {
double oij=gsl_matrix_get(evec,imp1,imp2);
double lambda=gsl_vector_get(eval,imp2);
if(lambda<=0) lambda=0;
else lambda=sqrt(lambda);
gsl_matrix_set(clmat,imp1,imp2,oij*lambda);
}
}
//Get correlate random numbers
gsl_blas_dgemv(CblasNoTrans,1.,clmat,rv1,0,rv2);
gsl_blas_dgemv(CblasNoTrans,1.,clmat,iv1,0,iv2);
for(imp1=0;imp1<nmaps;imp1++) {
flouble bm=nmt_k_function_eval(beam[imp1],kmod,intacc_beam);
flouble a_re=bm*gsl_vector_get(rv2,imp1);
flouble a_im=bm*gsl_vector_get(iv2,imp1);
if(ix==0) {
if(iy>ny/2)
continue;
else {
if(iy==0)
alms[imp1][index]=(fcomplex)(M_SQRT2*a_re+I*0*a_im);
else {
int iyy=ny-iy;
alms[imp1][index]=(fcomplex)(a_re+I*a_im);
alms[imp1][ix+(nx/2+1)*iyy]=(fcomplex)(a_re-I*a_im);
}
}
}
else
alms[imp1][index]=(fcomplex)(a_re+I*a_im);
}
}
}
} //omp end for
gsl_vector_free(rv1);
gsl_vector_free(iv1);
gsl_vector_free(rv2);
gsl_vector_free(iv2);
gsl_matrix_free(clmat);
gsl_vector_free(eval);
gsl_matrix_free(evec);
gsl_eigen_symmv_free(wsym);
end_rng(rng);
gsl_interp_accel_free(intacc_cells);
gsl_interp_accel_free(intacc_beam);
} //omp end parallel
//Restore error handler
gsl_set_error_handler(geh);
return alms;
}
static void read_key(fitsfile *fptr,int dtype,char *key,void *val,int *status)
{
fits_read_key(fptr,dtype,key,val,NULL,status);
if(*status)
report_error(NMT_ERROR_READ,"Key %s not found\n",key);
}
flouble *fs_read_flat_map(char *fname,int *nx,int *ny,flouble *lx,flouble *ly,int nfield)
{
fitsfile *fptr;
int numhdu,hdutype,naxis,naxis1,naxis2;
double cdelt1,cdelt2;
flouble nulval=-999;
int status=0;
fits_open_file(&fptr,fname,READONLY,&status);
if(status)
report_error(NMT_ERROR_FOPEN,"Can't open file %s\n",fname);
fits_get_num_hdus(fptr,&numhdu,&status);
if(nfield>=numhdu)
report_error(NMT_ERROR_READ,"%d-th field doesn't exist\n",nfield);
fits_movabs_hdu(fptr,nfield+1,&hdutype,&status);
if(hdutype!=IMAGE_HDU)
report_error(NMT_ERROR_READ,"Requested HDU is not an image\n");
//Read patch properties
read_key(fptr,TINT,"NAXIS",&naxis,&status);
read_key(fptr,TINT,"NAXIS1",&naxis1,&status);
read_key(fptr,TINT,"NAXIS2",&naxis2,&status);
read_key(fptr,TDOUBLE,"CDELT1",&cdelt1,&status);
read_key(fptr,TDOUBLE,"CDELT2",&cdelt2,&status);
if(naxis!=2)
report_error(NMT_ERROR_READ,"Can't find a two-dimensional map\n");
*nx=naxis1;
*ny=naxis2;
*lx=fabs(naxis1*cdelt1)*M_PI/180;
*ly=fabs(naxis2*cdelt2)*M_PI/180;
//Read data
long fpixel[2]={1,1};
flouble *map_out=my_malloc(naxis1*naxis2*sizeof(double));
#ifdef _SPREC
fits_read_pix(fptr,TFLOAT,fpixel,naxis1*naxis2,&nulval,map_out,NULL,&status);
#else //_SPREC
fits_read_pix(fptr,TDOUBLE,fpixel,naxis1*naxis2,&nulval,map_out,NULL,&status);
#endif //_SPREC
if(status)
report_error(NMT_ERROR_READ,"Error reading image from file %s\n",fname);
fits_close_file(fptr,&status);
return map_out;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-499,512)),ceild(4*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(8*t1+Nx+7,512)),floord(16*t2+Nx+3,512)),floord(4*t3+Nx-9,512)),floord(16*t1-16*t2+Nz+Nx+5,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),128*t4+126);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
8.norace3.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define M 200
#define N 200
int main() {
double A[M], B[M][N], C[N], sum = 0.0;
#pragma omp parallel for firstprivate(sum)
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
sum += B[i][j] * C[j];
}
A[i] = sum;
sum = 0.0;
}
}
// CHECK: Region is Data Race Free.
// END
|
GB_unaryop__ainv_bool_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_int16
// op(A') function: GB_tran__ainv_bool_int16
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_int16
(
bool *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
double_reduction_plus_2.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main()
{
double result = 100;
#pragma omp parallel reduction(+:result)
{
int rank = omp_get_thread_num();
result += rank;
}
printf("Result: %f\n", result);
}
|
convolution_pack8to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float16x4_t _sum = vdup_n_f16((__fp16)0.f);
if (bias_data_ptr)
{
_sum = vld1_f16(bias_data_ptr + p * 4);
}
const __fp16* kptr = weight_data_fp16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
float16x8_t _val = vld1q_f16(sptr + space_ofs[k] * 8);
float16x4_t _w0 = vld1_f16(kptr);
float16x4_t _w1 = vld1_f16(kptr + 4);
float16x4_t _w2 = vld1_f16(kptr + 8);
float16x4_t _w3 = vld1_f16(kptr + 12);
float16x4_t _w4 = vld1_f16(kptr + 16);
float16x4_t _w5 = vld1_f16(kptr + 20);
float16x4_t _w6 = vld1_f16(kptr + 24);
float16x4_t _w7 = vld1_f16(kptr + 28);
_sum = vfma_laneq_f16(_sum, _w0, _val, 0);
_sum = vfma_laneq_f16(_sum, _w1, _val, 1);
_sum = vfma_laneq_f16(_sum, _w2, _val, 2);
_sum = vfma_laneq_f16(_sum, _w3, _val, 3);
_sum = vfma_laneq_f16(_sum, _w4, _val, 4);
_sum = vfma_laneq_f16(_sum, _w5, _val, 5);
_sum = vfma_laneq_f16(_sum, _w6, _val, 6);
_sum = vfma_laneq_f16(_sum, _w7, _val, 7);
kptr += 32;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
vst1_f16(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(4*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t2+Nx,128),floord(Nt+Nx-4,128)),floord(2*t1+Nx+1,128)),floord(8*t3+Nx+4,128)),floord(4*t1-4*t2+Nz+Nx-1,128));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),128*t4+126),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
stats.c | //-----------------------------------------------------------------------------
// stats.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (Build 5.1.001)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// 05/10/18 (Build 5.1.013)
// 04/01/20 (Build 5.1.015)
// Author: L. Rossman (EPA)
// R. Dickinson (CDM)
//
// Simulation statistics functions.
//
// Build 5.1.007:
// - Exfiltration losses added to storage node statistics.
//
// Build 5.1.008:
// - Support for updating groundwater statistics added.
// - Support for updating maximum reported nodal depths added.
// - OpenMP parallelization applied to updating node and link flow statistics.
// - Updating of time that conduit is upstrm/dnstrm full was modified.
//
// Build 5.1.011:
// - Surcharging is now evaluated only under dynamic wave flow routing and
// storage nodes cannot be classified as surcharged.
//
// Build 5.1.012:
// - Time step statistics now evaluated only in non-steady state periods.
// - Check for full conduit flow now accounts for number of barrels.
//
// Build 5.1.013:
// - Include omp.h protected against lack of compiler support for OpenMP.
// - Statistics on impervious and pervious runoff totals added.
// - Storage nodes with a non-zero surcharge depth (e.g. enclosed tanks)
// can now be classified as being surcharged.
//
// Build 5.1.015:
// - Fixes bug in summary statistics when Report Start date > Start Date.
// - Fixes failure to initialize all subcatchment groundwater statistics.
// - Support added for grouped freqency table of routing time steps.
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "headers.h"
#include "swmm5.h"
#if defined(_OPENMP) //(5.1.013)
#include <omp.h>
#endif
//-----------------------------------------------------------------------------
// Shared variables
//-----------------------------------------------------------------------------
#define MAX_STATS 5
static TSysStats SysStats;
static TMaxStats MaxMassBalErrs[MAX_STATS];
static TMaxStats MaxCourantCrit[MAX_STATS];
static TMaxStats MaxFlowTurns[MAX_STATS];
static double SysOutfallFlow;
//-----------------------------------------------------------------------------
// Exportable variables (shared with statsrpt.c)
//-----------------------------------------------------------------------------
TSubcatchStats* SubcatchStats;
TNodeStats* NodeStats;
TLinkStats* LinkStats;
TStorageStats* StorageStats;
TOutfallStats* OutfallStats;
TPumpStats* PumpStats;
double MaxOutfallFlow;
double MaxRunoffFlow;
//-----------------------------------------------------------------------------
// Imported variables
//-----------------------------------------------------------------------------
extern double* NodeInflow; // defined in massbal.c
extern double* NodeOutflow; // defined in massbal.c
//-----------------------------------------------------------------------------
// External functions (declared in funcs.h)
//-----------------------------------------------------------------------------
// stats_open (called from swmm_start in swmm5.c)
// stats_close (called from swmm_end in swmm5.c)
// stats_report (called from swmm_end in swmm5.c)
// stats_updateSubcatchStats (called from subcatch_getRunoff)
// stats_updateGwaterStats (called from gwater_getGroundwater)
// stats_updateFlowStats (called from routing_execute)
// stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c)
// stats_updateMaxNodeDepth (called from output_saveNodeResults)
//-----------------------------------------------------------------------------
// Local functions
//-----------------------------------------------------------------------------
static void stats_updateNodeStats(int node, double tStep, DateTime aDate);
static void stats_updateLinkStats(int link, double tStep, DateTime aDate);
static void stats_findMaxStats(void);
static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x);
//=============================================================================
int stats_open()
//
// Input: none
// Output: returns an error code
// Purpose: opens the simulation statistics system.
//
{
int j, k;
double timeStepDelta; //(5.1.015)
double logMaxTimeStep; //(5.1.015)
double logMinTimeStep; //(5.1.015)
// --- set all pointers to NULL
NodeStats = NULL;
LinkStats = NULL;
StorageStats = NULL;
OutfallStats = NULL;
PumpStats = NULL;
// --- allocate memory for & initialize subcatchment statistics
SubcatchStats = NULL;
if ( Nobjects[SUBCATCH] > 0 )
{
SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH],
sizeof(TSubcatchStats));
if ( !SubcatchStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
for (j=0; j<Nobjects[SUBCATCH]; j++)
{
SubcatchStats[j].precip = 0.0;
SubcatchStats[j].runon = 0.0;
SubcatchStats[j].evap = 0.0;
SubcatchStats[j].infil = 0.0;
SubcatchStats[j].runoff = 0.0;
SubcatchStats[j].maxFlow = 0.0;
SubcatchStats[j].impervRunoff = 0.0; //(5.1.013)
SubcatchStats[j].pervRunoff = 0.0; //
}
for (j=0; j<Nobjects[SUBCATCH]; j++)
{
if ( Subcatch[j].groundwater == NULL ) continue;
Subcatch[j].groundwater->stats.avgUpperMoist = 0.0;
Subcatch[j].groundwater->stats.avgWaterTable = 0.0;
Subcatch[j].groundwater->stats.infil = 0.0;
Subcatch[j].groundwater->stats.latFlow = 0.0;
Subcatch[j].groundwater->stats.deepFlow = 0.0;
Subcatch[j].groundwater->stats.evap = 0.0;
Subcatch[j].groundwater->stats.maxFlow = 0.0;
Subcatch[j].groundwater->stats.finalUpperMoist = 0.0; //(5.1.015)
Subcatch[j].groundwater->stats.finalWaterTable = 0.0; //
}
}
// --- allocate memory for node & link stats
if ( Nobjects[LINK] > 0 )
{
NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats));
LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats));
if ( !NodeStats || !LinkStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
}
// --- initialize node stats
if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ )
{
NodeStats[j].avgDepth = 0.0;
NodeStats[j].maxDepth = 0.0;
NodeStats[j].maxDepthDate = StartDateTime;
NodeStats[j].maxRptDepth = 0.0;
NodeStats[j].volFlooded = 0.0;
NodeStats[j].timeFlooded = 0.0;
NodeStats[j].timeSurcharged = 0.0;
NodeStats[j].timeCourantCritical = 0.0;
NodeStats[j].totLatFlow = 0.0;
NodeStats[j].maxLatFlow = 0.0;
NodeStats[j].maxInflow = 0.0;
NodeStats[j].maxOverflow = 0.0;
NodeStats[j].maxPondedVol = 0.0;
NodeStats[j].maxInflowDate = StartDateTime;
NodeStats[j].maxOverflowDate = StartDateTime;
}
// --- initialize link stats
if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ )
{
LinkStats[j].maxFlow = 0.0;
LinkStats[j].maxVeloc = 0.0;
LinkStats[j].maxDepth = 0.0;
LinkStats[j].timeSurcharged = 0.0;
LinkStats[j].timeFullUpstream = 0.0;
LinkStats[j].timeFullDnstream = 0.0;
LinkStats[j].timeFullFlow = 0.0;
LinkStats[j].timeCapacityLimited = 0.0;
LinkStats[j].timeCourantCritical = 0.0;
for (k=0; k<MAX_FLOW_CLASSES; k++)
LinkStats[j].timeInFlowClass[k] = 0.0;
LinkStats[j].flowTurns = 0;
LinkStats[j].flowTurnSign = 0;
}
// --- allocate memory for & initialize storage unit statistics
if ( Nnodes[STORAGE] > 0 )
{
StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE],
sizeof(TStorageStats));
if ( !StorageStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( k = 0; k < Nobjects[NODE]; k++ )
{
if ( Node[k].type != STORAGE ) continue;
j = Node[k].subIndex;
StorageStats[j].initVol = Node[k].newVolume;
StorageStats[j].avgVol = 0.0;
StorageStats[j].maxVol = 0.0;
StorageStats[j].maxFlow = 0.0;
StorageStats[j].evapLosses = 0.0;
StorageStats[j].exfilLosses = 0.0;
StorageStats[j].maxVolDate = StartDateTime;
}
}
// --- allocate memory for & initialize outfall statistics
if ( Nnodes[OUTFALL] > 0 )
{
OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL],
sizeof(TOutfallStats));
if ( !OutfallStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( j = 0; j < Nnodes[OUTFALL]; j++ )
{
OutfallStats[j].avgFlow = 0.0;
OutfallStats[j].maxFlow = 0.0;
OutfallStats[j].totalPeriods = 0;
if ( Nobjects[POLLUT] > 0 )
{
OutfallStats[j].totalLoad =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
if ( !OutfallStats[j].totalLoad )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
for (k=0; k<Nobjects[POLLUT]; k++)
OutfallStats[j].totalLoad[k] = 0.0;
}
else OutfallStats[j].totalLoad = NULL;
}
}
// --- allocate memory & initialize pumping statistics
if ( Nlinks[PUMP] > 0 )
{
PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats));
if ( !PumpStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( j = 0; j < Nlinks[PUMP]; j++ )
{
PumpStats[j].utilized = 0.0;
PumpStats[j].minFlow = 0.0;
PumpStats[j].avgFlow = 0.0;
PumpStats[j].maxFlow = 0.0;
PumpStats[j].volume = 0.0;
PumpStats[j].energy = 0.0;
PumpStats[j].startUps = 0;
PumpStats[j].offCurveLow = 0.0;
PumpStats[j].offCurveHigh = 0.0;
}
}
// --- initialize system stats
MaxRunoffFlow = 0.0;
MaxOutfallFlow = 0.0;
SysStats.maxTimeStep = 0.0;
SysStats.minTimeStep = RouteStep;
SysStats.avgTimeStep = 0.0;
SysStats.avgStepCount = 0.0;
SysStats.steadyStateCount = 0.0;
// --- divide range between min and max routing time steps into //(5.1.015)
// equal intervals using a logarithmic scale //
logMaxTimeStep = log10(RouteStep); //
logMinTimeStep = log10(MinRouteStep); //
timeStepDelta = (logMaxTimeStep - logMinTimeStep) / (TIMELEVELS-1); //
SysStats.timeStepIntervals[0] = RouteStep; //
for (j = 1; j < TIMELEVELS; j++) //
{ //
SysStats.timeStepIntervals[j] = //
pow(10., logMaxTimeStep - j * timeStepDelta); //
SysStats.timeStepCounts[j] = 0; //
} //
SysStats.timeStepIntervals[TIMELEVELS - 1] = MinRouteStep; //
return 0;
}
//=============================================================================
void stats_close()
//
// Input: none
// Output:
// Purpose: closes the simulation statistics system.
//
{
int j;
FREE(SubcatchStats);
FREE(NodeStats);
FREE(LinkStats);
FREE(StorageStats);
if ( OutfallStats )
{
for ( j=0; j<Nnodes[OUTFALL]; j++ )
FREE(OutfallStats[j].totalLoad);
FREE(OutfallStats);
}
FREE(PumpStats);
}
//=============================================================================
void stats_report()
//
// Input: none
// Output: none
// Purpose: reports simulation statistics.
//
{
// --- report flow routing accuracy statistics
if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING )
{
stats_findMaxStats();
report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS);
report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS);
report_writeSysStats(&SysStats);
}
// --- report summary statistics
statsrpt_writeReport();
}
//=============================================================================
void stats_updateSubcatchStats(int j, double rainVol, double runonVol,
double evapVol, double infilVol,
double impervVol, double pervVol,
double runoffVol, double runoff)
//
// Input: j = subcatchment index
// rainVol = rainfall + snowfall volume (ft3)
// runonVol = runon volume from other subcatchments (ft3)
// evapVol = evaporation volume (ft3)
// infilVol = infiltration volume (ft3)
// impervVol = impervious runoff volume (ft3)
// pervVol = pervious runoff volume (ft3)
// runoffVol = runoff volume (ft3)
// runoff = runoff rate (cfs)
// Output: none
// Purpose: updates totals of runoff components for a specific subcatchment.
//
{
SubcatchStats[j].precip += rainVol;
SubcatchStats[j].runon += runonVol;
SubcatchStats[j].evap += evapVol;
SubcatchStats[j].infil += infilVol;
SubcatchStats[j].runoff += runoffVol;
SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff);
SubcatchStats[j].impervRunoff += impervVol; //(5.1.013)
SubcatchStats[j].pervRunoff += pervVol; //
}
//=============================================================================
void stats_updateGwaterStats(int j, double infil, double evap, double latFlow,
double deepFlow, double theta, double waterTable,
double tStep)
{
Subcatch[j].groundwater->stats.infil += infil * tStep;
Subcatch[j].groundwater->stats.evap += evap * tStep;
Subcatch[j].groundwater->stats.latFlow += latFlow * tStep;
Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep;
Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep;
Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep;
Subcatch[j].groundwater->stats.finalUpperMoist = theta;
Subcatch[j].groundwater->stats.finalWaterTable = waterTable;
if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) )
{
Subcatch[j].groundwater->stats.maxFlow = latFlow;
}
}
//=============================================================================
void stats_updateMaxRunoff()
//
// Input: none
// Output: updates global variable MaxRunoffFlow
// Purpose: updates value of maximum system runoff rate.
//
{
int j;
double sysRunoff = 0.0;
for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff;
MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff);
}
//=============================================================================
void stats_updateMaxNodeDepth(int j, double depth)
//
// Input: j = node index
// depth = water depth at node at current reporting time (ft)
// Output: none
// Purpose: updates a node's maximum depth recorded at reporting times.
//
{
if ( NodeStats != NULL )
NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth);
}
//=============================================================================
void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount,
int steadyState)
//
// Input: tStep = routing time step (sec)
// aDate = current date/time
// stepCount = # steps required to solve routing at current time period
// steadyState = TRUE if steady flow conditions exist
// Output: none
// Purpose: updates various flow routing statistics at current time period.
//
{
int j;
// --- update stats only after reporting period begins
if ( aDate < ReportStart ) return;
SysOutfallFlow = 0.0;
// --- update node & link stats
#pragma omp parallel num_threads(NumThreads)
{
#pragma omp for
for ( j=0; j<Nobjects[NODE]; j++ )
stats_updateNodeStats(j, tStep, aDate);
#pragma omp for
for ( j=0; j<Nobjects[LINK]; j++ )
stats_updateLinkStats(j, tStep, aDate);
}
// --- update count of times in steady state
ReportStepCount++;
SysStats.steadyStateCount += steadyState;
// --- update time step stats if not in steady state
if ( steadyState == FALSE )
{
// --- skip initial time step for min. value)
if ( OldRoutingTime > 0 )
{
SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep);
// --- locate interval that logged time step falls in //(5.1.015)
// and update its count //
for (j = 1; j < TIMELEVELS; j++) //
if (tStep >= SysStats.timeStepIntervals[j]) //
{ //
SysStats.timeStepCounts[j]++; //
break; //
} //
}
SysStats.avgTimeStep += tStep;
SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep);
// --- update iteration step count stats
SysStats.avgStepCount += stepCount;
}
// --- update max. system outfall flow
MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow);
}
//=============================================================================
void stats_updateCriticalTimeCount(int node, int link)
//
// Input: node = node index
// link = link index
// Output: none
// Purpose: updates count of times a node or link was time step-critical.
//
{
if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0;
else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0;
}
//=============================================================================
void stats_updateNodeStats(int j, double tStep, DateTime aDate)
//
// Input: j = node index
// tStep = routing time step (sec)
// aDate = current date/time
// Output: none
// Purpose: updates flow statistics for a node.
//
{
int k, p;
double newVolume = Node[j].newVolume;
double newDepth = Node[j].newDepth;
double yCrown = Node[j].crownElev - Node[j].invertElev;
int canPond = (AllowPonding && Node[j].pondedArea > 0.0);
// --- update depth statistics
NodeStats[j].avgDepth += newDepth;
if ( newDepth > NodeStats[j].maxDepth )
{
NodeStats[j].maxDepth = newDepth;
NodeStats[j].maxDepthDate = aDate;
}
// --- update flooding, ponding, and surcharge statistics
if ( Node[j].type != OUTFALL )
{
if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 )
{
NodeStats[j].timeFlooded += tStep;
NodeStats[j].volFlooded += Node[j].overflow * tStep;
if ( canPond ) NodeStats[j].maxPondedVol =
MAX(NodeStats[j].maxPondedVol,
(newVolume - Node[j].fullVolume));
}
// --- for dynamic wave routing, classify a node as //(5.1.013)
// surcharged if its water level exceeds its crown elev.
if (RouteModel == DW) //(5.1.013)
{
if ((Node[j].type != STORAGE || Node[j].surDepth > 0.0) && //(5.1.013)
newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev)
{
NodeStats[j].timeSurcharged += tStep;
}
}
}
// --- update storage statistics
if ( Node[j].type == STORAGE )
{
k = Node[j].subIndex;
StorageStats[k].avgVol += newVolume;
StorageStats[k].evapLosses +=
Storage[Node[j].subIndex].evapLoss;
StorageStats[k].exfilLosses +=
Storage[Node[j].subIndex].exfilLoss;
newVolume = MIN(newVolume, Node[j].fullVolume);
if ( newVolume > StorageStats[k].maxVol )
{
StorageStats[k].maxVol = newVolume;
StorageStats[k].maxVolDate = aDate;
}
StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow);
}
// --- update outfall statistics
if ( Node[j].type == OUTFALL )
{
k = Node[j].subIndex;
if ( Node[j].inflow >= MIN_RUNOFF_FLOW )
{
OutfallStats[k].avgFlow += Node[j].inflow;
OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow);
OutfallStats[k].totalPeriods++;
}
for (p=0; p<Nobjects[POLLUT]; p++)
{
OutfallStats[k].totalLoad[p] += Node[j].inflow *
Node[j].newQual[p] * tStep;
}
SysOutfallFlow += Node[j].inflow;
}
// --- update inflow statistics
NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) *
0.5 * tStep );
if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) )
NodeStats[j].maxLatFlow = Node[j].newLatFlow;
if ( Node[j].inflow > NodeStats[j].maxInflow )
{
NodeStats[j].maxInflow = Node[j].inflow;
NodeStats[j].maxInflowDate = aDate;
}
// --- update overflow statistics
if ( Node[j].overflow > NodeStats[j].maxOverflow )
{
NodeStats[j].maxOverflow = Node[j].overflow;
NodeStats[j].maxOverflowDate = aDate;
}
}
//=============================================================================
void stats_updateLinkStats(int j, double tStep, DateTime aDate)
//
// Input: j = link index
// tStep = routing time step (sec)
// aDate = current date/time
// Output: none
// Purpose: updates flow statistics for a link.
//
{
int k;
double q, v;
double dq;
// --- update max. flow
dq = Link[j].newFlow - Link[j].oldFlow;
q = fabs(Link[j].newFlow);
if ( q > LinkStats[j].maxFlow )
{
LinkStats[j].maxFlow = q;
LinkStats[j].maxFlowDate = aDate;
}
// --- update max. velocity
v = link_getVelocity(j, q, Link[j].newDepth);
if ( v > LinkStats[j].maxVeloc )
{
LinkStats[j].maxVeloc = v;
}
// --- update max. depth
if ( Link[j].newDepth > LinkStats[j].maxDepth )
{
LinkStats[j].maxDepth = Link[j].newDepth;
}
if ( Link[j].type == PUMP )
{
if ( q >= Link[j].qFull )
LinkStats[j].timeFullFlow += tStep;
if ( q > MIN_RUNOFF_FLOW )
{
k = Link[j].subIndex;
PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q);
PumpStats[k].maxFlow = LinkStats[j].maxFlow;
PumpStats[k].avgFlow += q;
PumpStats[k].volume += q*tStep;
PumpStats[k].utilized += tStep;
PumpStats[k].energy += link_getPower(j)*tStep/3600.0;
if ( Link[j].flowClass == DN_DRY )
PumpStats[k].offCurveLow += tStep;
if ( Link[j].flowClass == UP_DRY )
PumpStats[k].offCurveHigh += tStep;
if ( Link[j].oldFlow < MIN_RUNOFF_FLOW )
PumpStats[k].startUps++;
PumpStats[k].totalPeriods++;
LinkStats[j].timeSurcharged += tStep;
LinkStats[j].timeFullUpstream += tStep;
LinkStats[j].timeFullDnstream += tStep;
}
}
else if ( Link[j].type == CONDUIT )
{
// --- update time under normal flow & inlet control
if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep;
if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep;
// --- update flow classification distribution
k = Link[j].flowClass;
if ( k >= 0 && k < MAX_FLOW_CLASSES )
{
++LinkStats[j].timeInFlowClass[k];
}
// --- update time conduit is full
k = Link[j].subIndex;
if ( q >= Link[j].qFull * (double)Conduit[k].barrels )
LinkStats[j].timeFullFlow += tStep;
if ( Conduit[k].capacityLimited )
LinkStats[j].timeCapacityLimited += tStep;
switch (Conduit[k].fullState)
{
case ALL_FULL:
LinkStats[j].timeSurcharged += tStep;
LinkStats[j].timeFullUpstream += tStep;
LinkStats[j].timeFullDnstream += tStep;
break;
case UP_FULL:
LinkStats[j].timeFullUpstream += tStep;
break;
case DN_FULL:
LinkStats[j].timeFullDnstream += tStep;
}
}
// --- update flow turn count
k = LinkStats[j].flowTurnSign;
LinkStats[j].flowTurnSign = SGN(dq);
if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 )
LinkStats[j].flowTurns++;
}
//=============================================================================
void stats_findMaxStats()
//
// Input: none
// Output: none
// Purpose: finds nodes & links with highest mass balance errors
// & highest times Courant time-step critical.
//
{
int j;
double x;
double stepCount = ReportStepCount - SysStats.steadyStateCount; //(5.1.015)
// --- initialize max. stats arrays
for (j=0; j<MAX_STATS; j++)
{
MaxMassBalErrs[j].objType = NODE;
MaxMassBalErrs[j].index = -1;
MaxMassBalErrs[j].value = -1.0;
MaxCourantCrit[j].index = -1;
MaxCourantCrit[j].value = -1.0;
MaxFlowTurns[j].index = -1;
MaxFlowTurns[j].value = -1.0;
}
// --- find links with most flow turns
if ( stepCount > 2 ) //(5.1.015)
{
for (j=0; j<Nobjects[LINK]; j++)
{
x = 100.0 * LinkStats[j].flowTurns / (2./3.*(stepCount-2)); //(5.1.015)
stats_updateMaxStats(MaxFlowTurns, LINK, j, x);
}
}
// --- find nodes with largest mass balance errors
for (j=0; j<Nobjects[NODE]; j++)
{
// --- skip terminal nodes and nodes with negligible inflow
if ( Node[j].degree <= 0 ) continue;
if ( NodeInflow[j] <= 0.1 ) continue;
// --- evaluate mass balance error
// (Note: NodeInflow & NodeOutflow include any initial and final
// stored volumes, respectively).
if ( NodeInflow[j] > 0.0 )
x = 1.0 - NodeOutflow[j] / NodeInflow[j];
else if ( NodeOutflow[j] > 0.0 ) x = -1.0;
else x = 0.0;
stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x);
}
// --- stop if not using a variable time step
if ( RouteModel != DW || CourantFactor == 0.0 ) return;
// --- find nodes most frequently Courant critical
if ( stepCount == 0 ) return; //(5.1.015)
for (j=0; j<Nobjects[NODE]; j++)
{
x = NodeStats[j].timeCourantCritical / stepCount; //(5.1.015)
stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x);
}
// --- find links most frequently Courant critical
for (j=0; j<Nobjects[LINK]; j++)
{
x = LinkStats[j].timeCourantCritical / stepCount; //(5.1.015)
stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x);
}
}
//=============================================================================
void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x)
//
// Input: maxStats[] = array of critical statistics values
// i = object category (NODE or LINK)
// j = object index
// x = value of statistic for the object
// Output: none
// Purpose: updates the collection of most critical statistics
//
{
int k;
TMaxStats maxStats1, maxStats2;
maxStats1.objType = i;
maxStats1.index = j;
maxStats1.value = x;
for (k=0; k<MAX_STATS; k++)
{
if ( fabs(maxStats1.value) > fabs(maxStats[k].value) )
{
maxStats2 = maxStats[k];
maxStats[k] = maxStats1;
maxStats1 = maxStats2;
}
}
}
|
tgen-openmp.c | #include <math.h>
#include "m-array.h"
M_ARRAY_DEF(double, double)
int main(void) {
const int n = 250000000;
const double x = 0.001;
M_LET(v, M_ARRAY_OPLIST(double)) {
array_double_resize(v, n);
#pragma omp parallel for
for (int i = 0; i < array_double_size(v); ++i) {
array_double_set_at(v, i, cos(i * x));
}
}
return 0;
}
|
sort.c | /*
This file is part of HiParTI!.
HiParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
HiParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with HiParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <math.h>
#include <time.h>
#include <HiParTI.h>
static const uint32_t MASKS[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF};
static const uint32_t SHIFTS[] = {1, 2, 4, 8};
void pti_SwapValuesMat(ptiSparseMatrix *mtx, ptiNnzIndex ind1, ptiNnzIndex ind2) {
ptiIndex eleind1;
eleind1 = mtx->rowind.data[ind1];
mtx->rowind.data[ind1] = mtx->rowind.data[ind2];
mtx->rowind.data[ind2] = eleind1;
eleind1 = mtx->colind.data[ind1];
mtx->colind.data[ind1] = mtx->colind.data[ind2];
mtx->colind.data[ind2] = eleind1;
ptiValue val1 = mtx->values.data[ind1];
mtx->values.data[ind1] = mtx->values.data[ind2];
mtx->values.data[ind2] = val1;
}
/* Compare functions */
int pti_SparseMatrixCompareIndicesMorton2D(
ptiSparseMatrix * const mtx1,
uint64_t loc1,
ptiSparseMatrix * const mtx2,
uint64_t loc2,
ptiElementIndex sb_bits)
{
uint64_t mkey1 = 0, mkey2 = 0;
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = mtx1->rowind.data[loc1];
uint32_t y1 = mtx1->colind.data[loc1];
uint32_t x2 = mtx2->rowind.data[loc2];
uint32_t y2 = mtx2->colind.data[loc2];
/* Compare block indices */
ptiIndex blk_x1 = x1 >> sb_bits;
ptiIndex blk_y1 = y1 >> sb_bits;
ptiIndex blk_x2 = x2 >> sb_bits;
ptiIndex blk_y2 = y2 >> sb_bits;
if(blk_x1 < blk_x2) {
return -1;
} else if(blk_x1 > blk_x2) {
return 1;
} else if(blk_y1 < blk_y2) { // if blk_x1 == blk_x2
return -1;
} else if(blk_y1 > blk_y2) { // if blk_x1 == blk_x2
return 1;
}
/* blk_x1 == blk_x2, blk_y1 == blk_y2, sort inside a block in Z-Morton order */
uint64_t x = x1 - (blk_x1 << sb_bits);
uint64_t y = y1 - (blk_y1 << sb_bits);
x = (x | (x << SHIFTS[3])) & MASKS[3];
x = (x | (x << SHIFTS[2])) & MASKS[2];
x = (x | (x << SHIFTS[1])) & MASKS[1];
x = (x | (x << SHIFTS[0])) & MASKS[0];
y = (y | (y << SHIFTS[3])) & MASKS[3];
y = (y | (y << SHIFTS[2])) & MASKS[2];
y = (y | (y << SHIFTS[1])) & MASKS[1];
y = (y | (y << SHIFTS[0])) & MASKS[0];
mkey1 = y | (x << 1);
x = x2 - (blk_x2 << sb_bits);
y = y2 - (blk_y2 << sb_bits);
x = (x | (x << SHIFTS[3])) & MASKS[3];
x = (x | (x << SHIFTS[2])) & MASKS[2];
x = (x | (x << SHIFTS[1])) & MASKS[1];
x = (x | (x << SHIFTS[0])) & MASKS[0];
y = (y | (y << SHIFTS[3])) & MASKS[3];
y = (y | (y << SHIFTS[2])) & MASKS[2];
y = (y | (y << SHIFTS[1])) & MASKS[1];
y = (y | (y << SHIFTS[0])) & MASKS[0];
mkey2 = y | (x << 1);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
int pti_SparseMatrixCompareIndicesSingleMode(ptiSparseMatrix * const mtx1, ptiNnzIndex loc1, ptiSparseMatrix * const mtx2, ptiNnzIndex loc2, ptiIndex const mode)
{
ptiIndex eleind1, eleind2;
if (mode == 0) {
eleind1 = mtx1->rowind.data[loc1];
eleind2 = mtx2->rowind.data[loc2];
} else if (mode == 1) {
eleind1 = mtx1->colind.data[loc1];
eleind2 = mtx2->colind.data[loc2];
}
// printf("eleind1: %u (loc: %lu), eleind2: %u (loc: %lu)\n", eleind1, loc1, eleind2, loc2); fflush(stdout);
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
return 0;
}
int pti_SparseMatrixCompareIndicesRowBlock(
ptiSparseMatrix * const mtx1,
ptiNnzIndex loc1,
ptiSparseMatrix * const mtx2,
ptiNnzIndex loc2,
ptiElementIndex sk_bits)
{
ptiIndex eleind1 = mtx1->rowind.data[loc1];
ptiIndex eleind2 = mtx2->rowind.data[loc2];
ptiIndex blkind1 = eleind1 >> sk_bits;
ptiIndex blkind2 = eleind2 >> sk_bits;
// printf("blkind1: %lu, blkind2: %lu\n", blkind1, blkind2);
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
eleind1 = mtx1->colind.data[loc1];
eleind2 = mtx2->colind.data[loc2];
blkind1 = eleind1 >> sk_bits;
blkind2 = eleind2 >> sk_bits;
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
return 0;
}
/* Quick sort functions */
static void pti_QuickSortIndexMorton2D(ptiSparseMatrix *mtx, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits)
{
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(pti_SparseMatrixCompareIndicesMorton2D(mtx, i, mtx, p, sb_bits) < 0) {
// printf("(%lu, %lu) result: %d\n", i, p, pti_SparseMatrixCompareIndicesMorton2D(mtx, i, mtx, p));
++i;
}
while(pti_SparseMatrixCompareIndicesMorton2D(mtx, p, mtx, j, sb_bits) < 0) {
// printf("(%lu, %lu) result: %d\n", p, j,pti_SparseMatrixCompareIndicesMorton2D(mtx, p, mtx, j));
--j;
}
if(i >= j) {
break;
}
pti_SwapValuesMat(mtx, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(mtx)
{
pti_QuickSortIndexMorton2D(mtx, l, i, sb_bits);
}
pti_QuickSortIndexMorton2D(mtx, i, r, sb_bits);
#pragma omp taskwait
}
static void pti_QuickSortIndexSingleMode(ptiSparseMatrix *mtx, ptiNnzIndex l, ptiNnzIndex r, ptiIndex mode)
{
// printf("l: %lu, r: %lu.\n", l, r); fflush(stdout);
ptiNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
// printf("i: %lu, j: %lu.\n", i, j); fflush(stdout);
while(pti_SparseMatrixCompareIndicesSingleMode(mtx, i, mtx, p, mode) < 0) {
++i;
}
while(pti_SparseMatrixCompareIndicesSingleMode(mtx, p, mtx, j, mode) < 0) {
--j;
}
if(i >= j) {
break;
}
// printf("new i: %lu (%u, %u), j: %lu (%u, %u).\n", i, mtx->rowind.data[i], mtx->colind.data[i], j, mtx->rowind.data[j], mtx->colind.data[j]); fflush(stdout);
pti_SwapValuesMat(mtx, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
// printf("p: %lu.\n", p); fflush(stdout);
}
#pragma omp task firstprivate(l,i) shared(mtx, mode)
{
pti_QuickSortIndexSingleMode(mtx, l, i, mode);
}
pti_QuickSortIndexSingleMode(mtx, i, r, mode);
#pragma omp taskwait
}
static void pti_QuickSortIndexRowBlock(ptiSparseMatrix *mtx, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sk_bits)
{
ptiNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(pti_SparseMatrixCompareIndicesRowBlock(mtx, i, mtx, p, sk_bits) < 0) {
++i;
}
while(pti_SparseMatrixCompareIndicesRowBlock(mtx, p, mtx, j, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
pti_SwapValuesMat(mtx, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(mtx, sk_bits)
{
pti_QuickSortIndexRowBlock(mtx, l, i, sk_bits);
}
pti_QuickSortIndexRowBlock(mtx, i, r, sk_bits);
#pragma omp taskwait
}
/****************************
* Sorting functions
****************************/
void ptiSparseMatrixSortIndexMorton(
ptiSparseMatrix *mtx,
int force,
ptiNnzIndex begin,
ptiNnzIndex end,
ptiElementIndex sb_bits)
{
if(force) {
#pragma omp parallel
{
#pragma omp single nowait
{
pti_QuickSortIndexMorton2D(mtx, begin, end, sb_bits);
}
}
}
}
void ptiSparseMatrixSortIndexSingleMode(ptiSparseMatrix *mtx, int force, ptiIndex mode, int tk)
{
if(force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
pti_QuickSortIndexSingleMode(mtx, 0, mtx->nnz, mode);
}
}
}
}
/**
* Reorder the elements in a COO sparse matrix lexicographically, sorting by row major order.
* @param mtx the sparse matrix to operate on
*/
void ptiSparseMatrixSortIndexRowBlock(
ptiSparseMatrix *mtx,
int force,
ptiNnzIndex begin,
ptiNnzIndex end,
ptiElementIndex sk_bits)
{
if(force) {
#pragma omp parallel
{
#pragma omp single nowait
{
pti_QuickSortIndexRowBlock(mtx, begin, end, sk_bits);
}
}
}
}
/**
* Randomly shuffle all indices.
*
* @param[in] mtx matrix to be shuffled
* @param[out] map_inds records the randomly generated mapping
*
*/
void ptiGetRandomShuffledIndicesMat(ptiSparseMatrix *mtx, ptiIndex ** map_inds)
{
/* Get randomly renumbering indices */
for(ptiIndex m = 0; m < 2; ++m) {
ptiIndex dim_len;
if (m == 0) dim_len = mtx->nrows;
else if (m == 1) dim_len = mtx->ncols;
for(long int i = dim_len - 1; i > 0; --i) {
srand(m+i+1+time(NULL));
ptiIndex new_loc = (ptiIndex) (rand() % (i+1));
/* Swap i <-> new_loc */
ptiIndex tmp = map_inds[m][i];
map_inds[m][i] = map_inds[m][new_loc];
map_inds[m][new_loc] = tmp;
}
}
} |
VolumetricReplicationPadding.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricReplicationPadding.c"
#else
static inline void THNN_(VolumetricReplicationPadding_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
int dimw = 3;
int dimh = 2;
int dimd = 1;
int dimslices = 0;
long nslices;
long idepth;
long iheight;
long iwidth;
long odepth;
long oheight;
long owidth;
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 5)
{
dimw++;
dimh++;
dimd++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
idepth = input->size[dimd];
iheight = input->size[dimh];
iwidth = input->size[dimw];
odepth = idepth + pfront + pback;
oheight = iheight + ptop + pbottom;
owidth = iwidth + pleft + pright;
THArgCheck(owidth >= 1 || oheight >= 1 || odepth >= 1, 2,
"input (D: %d H: %d, W: %d)is too small."
" Calculated output D: %d H: %d W: %d",
idepth, iheight, iwidth, odepth, oheight, owidth);
if (gradOutput != NULL) {
THArgCheck(nslices == THTensor_(size)(gradOutput, dimslices), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
nslices, THTensor_(size)(gradOutput, dimslices));
THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THTensor_(size)(gradOutput, dimw));
THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THTensor_(size)(gradOutput, dimh));
THArgCheck(odepth == THTensor_(size)(gradOutput, dimd), 3,
"gradOutput depth unexpected. Expected: %d, Got: %d",
odepth, THTensor_(size)(gradOutput, dimd));
}
}
static void THNN_(VolumetricReplicationPadding_updateOutput_frame)(
real *input_p, real *output_p,
long nslices,
long iwidth, long iheight, long idepth,
long owidth, long oheight, long odepth,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback)
{
int iStartX = fmax(0, -pleft);
int iStartY = fmax(0, -ptop);
int iStartZ = fmax(0, -pfront);
int oStartX = fmax(0, pleft);
int oStartY = fmax(0, ptop);
int oStartZ = fmax(0, pfront);
long k, ip_x, ip_y, ip_z;
#pragma omp parallel for private(k, ip_x, ip_y, ip_z)
for (k = 0; k < nslices; k++) {
long i, j, z;
for (z = 0; z < odepth; z++) {
for (i = 0; i < oheight; i++) {
for (j = 0; j < owidth; j++) {
if (j < pleft) {
ip_x = pleft;
} else if (j >= pleft && j < iwidth + pleft) {
ip_x = j;
} else {
ip_x = iwidth + pleft - 1;
}
ip_x = ip_x - oStartX + iStartX;
if (i < ptop) {
ip_y = ptop;
} else if (i >= ptop && i < iheight + ptop) {
ip_y = i;
} else {
ip_y = iheight + ptop - 1;
}
ip_y = ip_y - oStartY + iStartY;
if (z < pfront) {
ip_z = pfront;
} else if (z >= pfront && z < idepth + pfront) {
ip_z = z;
} else {
ip_z = idepth + pfront - 1;
}
ip_z = ip_z - oStartZ + iStartZ;
real *dest_p = output_p + k * owidth * oheight * odepth +
z * owidth * oheight + i * owidth + j;
real *src_p = input_p + k * iwidth * iheight * idepth +
ip_z * iwidth * iheight + ip_y * iwidth + ip_x;
*dest_p = *src_p;
}
}
}
}
}
void THNN_(VolumetricReplicationPadding_updateOutput)(THNNState *state,
THTensor *input,
THTensor *output,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback)
{
int dimw = 3;
int dimh = 2;
int dimd = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long idepth;
long iheight;
long iwidth;
long odepth;
long oheight;
long owidth;
real *input_data;
real *output_data;
THNN_(VolumetricReplicationPadding_shapeCheck)(
state, input, NULL, pleft, pright,
ptop, pbottom, pfront, pback);
if (input->nDimension == 5)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimd++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
idepth = input->size[dimd];
iheight = input->size[dimh];
iwidth = input->size[dimw];
odepth = idepth + pfront + pback;
oheight = iheight + ptop + pbottom;
owidth = iwidth + pleft + pright;
/* get contiguous input */
input = THTensor_(newContiguous)(input);
/* resize output */
if (input->nDimension == 4)
{
THTensor_(resize4d)(output, nslices, odepth, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(VolumetricReplicationPadding_updateOutput_frame)(
input_data, output_data, nslices, iwidth, iheight, idepth,
owidth, oheight, odepth, pleft, pright, ptop, pbottom, pfront,
pback);
}
else
{
long p;
THTensor_(resize5d)(output, nbatch, nslices, odepth, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(VolumetricReplicationPadding_updateOutput_frame)(
input_data + p * nslices * iwidth * iheight * idepth,
output_data + p * nslices * owidth * oheight * odepth,
nslices,
iwidth, iheight, idepth,
owidth, oheight, odepth,
pleft, pright,
ptop, pbottom,
pfront, pback);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(VolumetricReplicationPadding_updateGradInput_frame)(
real *ginput_p, real *goutput_p,
long nslices,
long iwidth, long iheight, long idepth,
long owidth, long oheight, long odepth,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback)
{
int iStartX = fmax(0, -pleft);
int iStartY = fmax(0, -ptop);
int iStartZ = fmax(0, -pfront);
int oStartX = fmax(0, pleft);
int oStartY = fmax(0, ptop);
int oStartZ = fmax(0, pfront);
long k, ip_x, ip_y, ip_z;
#pragma omp parallel for private(k, ip_x, ip_y, ip_z)
for (k = 0; k < nslices; k++) {
long i, j, z;
for (z = 0; z < odepth; z++) {
for (i = 0; i < oheight; i++) {
for (j = 0; j < owidth; j++) {
if (j < pleft) {
ip_x = pleft;
} else if (j >= pleft && j < iwidth + pleft) {
ip_x = j;
} else {
ip_x = iwidth + pleft - 1;
}
ip_x = ip_x - oStartX + iStartX;
if (i < ptop) {
ip_y = ptop;
} else if (i >= ptop && i < iheight + ptop) {
ip_y = i;
} else {
ip_y = iheight + ptop - 1;
}
ip_y = ip_y - oStartY + iStartY;
if (z < pfront) {
ip_z = pfront;
} else if (z >= pfront && z < idepth + pfront) {
ip_z = z;
} else {
ip_z = idepth + pfront - 1;
}
ip_z = ip_z - oStartZ + iStartZ;
real *src_p = goutput_p + k * owidth * oheight * odepth +
z * owidth * oheight + i * owidth + j;
real *dest_p = ginput_p + k * iwidth * iheight * idepth +
ip_z * iwidth * iheight + ip_y * iwidth + ip_x;
*dest_p += *src_p;
}
}
}
}
}
void THNN_(VolumetricReplicationPadding_updateGradInput)(THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback)
{
int dimw = 3;
int dimh = 2;
int dimd = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long idepth;
long iheight;
long iwidth;
long odepth;
long oheight;
long owidth;
if (input->nDimension == 5)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimd++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
idepth = input->size[dimd];
iheight = input->size[dimh];
iwidth = input->size[dimw];
odepth = idepth + pfront + pback;
oheight = iheight + ptop + pbottom;
owidth = iwidth + pleft + pright;
THNN_(VolumetricReplicationPadding_shapeCheck)(
state, input, NULL, pleft, pright,
ptop, pbottom, pfront, pback);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (input->nDimension == 4) {
THNN_(VolumetricReplicationPadding_updateGradInput_frame)(
THTensor_(data)(gradInput),
THTensor_(data)(gradOutput),
nslices,
iwidth, iheight, idepth,
owidth, oheight, odepth,
pleft, pright,
ptop, pbottom,
pfront, pback);
} else {
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++) {
THNN_(VolumetricReplicationPadding_updateGradInput_frame)(
THTensor_(data)(gradInput) + p * nslices * idepth * iheight * iwidth,
THTensor_(data)(gradOutput) + p * nslices * odepth * oheight * owidth,
nslices,
iwidth, iheight, idepth,
owidth, oheight, odepth,
pleft, pright,
ptop, pbottom,
pfront, pback);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
stacks.c | // -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2022, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//*****************************************************************************
// local includes
//*****************************************************************************
#include "stacks.h"
//*****************************************************************************
// interface functions
//*****************************************************************************
#define Ad(q) q.aptr
#define Ap(q) q->aptr
void
sstack_ptr_set
(
s_element_ptr_t *p,
s_element_t *v
)
{
atomic_store_explicit(&Ap(p), v, memory_order_relaxed);
}
s_element_t *
sstack_ptr_get
(
s_element_ptr_t *e
)
{
return (s_element_t *) atomic_load_explicit(&Ap(e), memory_order_relaxed);
}
s_element_t *
sstack_swap
(
s_element_ptr_t *q,
s_element_t *r
)
{
return (s_element_t *) atomic_exchange_explicit(&Ap(q), r, memory_order_relaxed);
}
void
sstack_push
(
s_element_ptr_t *q,
s_element_t *e
)
{
s_element_t *first =
(s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), first, memory_order_relaxed);
atomic_store_explicit(&Ap(q), e, memory_order_relaxed);
}
s_element_t *
sstack_pop
(
s_element_ptr_t *q
)
{
s_element_t *e = (s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
if (e) {
s_element_t *next =
(s_element_t *) atomic_load_explicit(&(e->Ad(next)), memory_order_relaxed);
atomic_store_explicit(&Ap(q), next, memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), 0, memory_order_relaxed);
}
return e;
}
s_element_t *
sstack_steal
(
s_element_ptr_t *q
)
{
s_element_t *e = sstack_swap(q, 0);
return e;
}
void
sstack_reverse
(
s_element_ptr_t *q
)
{
s_element_t *prev = NULL;
s_element_t *e = (s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
while (e) {
s_element_t *next =
(s_element_t *) atomic_load_explicit(&(e->Ad(next)), memory_order_relaxed);
atomic_store_explicit(&(e->Ad(next)), prev, memory_order_relaxed);
prev = e;
e = next;
}
atomic_store_explicit(&Ap(q), prev, memory_order_relaxed);
}
void
sstack_forall
(
s_element_ptr_t *q,
stack_forall_fn_t fn,
void *arg
)
{
s_element_t *current =
(s_element_t *) atomic_load_explicit(&Ap(q), memory_order_relaxed);
while (current) {
fn(current, arg);
current =
(s_element_t *) atomic_load_explicit(¤t->Ad(next), memory_order_relaxed);
}
}
void
cstack_ptr_set
(
s_element_ptr_t *e,
s_element_t *v
)
{
atomic_init(&Ap(e), (s_element_ptr_t *) v);
}
s_element_t *
cstack_ptr_get
(
s_element_ptr_t *e
)
{
return (s_element_t *) atomic_load(&Ap(e));
}
s_element_t *
cstack_swap
(
s_element_ptr_t *q,
s_element_t *r
)
{
s_element_t *e = (s_element_t *) atomic_exchange(&Ap(q), r);
return e;
}
void
cstack_push
(
s_element_ptr_t *q,
s_element_t *e
)
{
s_element_t *head = (s_element_t *) atomic_load(&Ap(q));
s_element_t *new_head = e;
// push a singleton or a chain on the list
for (;;) {
s_element_t *enext = (s_element_t *) atomic_load(&e->Ad(next));
if (enext == 0) break;
e = enext;
}
do {
atomic_store(&e->Ad(next), head);
} while (!atomic_compare_exchange_strong(&Ap(q), &head, new_head));
}
s_element_t *
cstack_pop
(
s_element_ptr_t *q
)
{
s_element_t *oldhead = (s_element_t *) atomic_load(&Ap(q));
s_element_t *next = 0;
do {
if (oldhead == 0) return 0;
next = (s_element_t *) atomic_load(&oldhead->Ad(next));
} while (!atomic_compare_exchange_strong(&Ap(q), &oldhead, next));
atomic_store(&oldhead->Ad(next), 0);
return oldhead;
}
s_element_t *
cstack_steal
(
s_element_ptr_t *q
)
{
s_element_t *e = cstack_swap(q, 0);
return e;
}
void
cstack_forall
(
s_element_ptr_t *q,
stack_forall_fn_t fn,
void *arg
)
{
s_element_t *current = (s_element_t *) atomic_load(&Ap(q));
while (current) {
fn(current, arg);
current = (s_element_t *) atomic_load(¤t->Ad(next));
}
}
//*****************************************************************************
// unit test
//*****************************************************************************
#define UNIT_TEST 0
#if UNIT_TEST
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); // int_q_element_t
typed_stack_declare_type(int);
typed_stack_impl(int, cstack);
typed_stack_elem_ptr(int) queue;
void
print(typed_stack_elem(int) *e, void *arg)
{
printf("%d\n", e->value);
}
int main(int argc, char **argv)
{
int i;
for (i = 0; i < 10; i++) {
typed_stack_elem_ptr(int)
item = (typed_stack_elem_ptr(int)) malloc(sizeof(typed_stack_elem(int)));
item->value = i;
typed_stack_elem_ptr_set(int, cstack)(item, 0);
typed_stack_push(int, cstack)(&queue, item);
}
typed_stack_forall(int, cstack)(&queue, print, 0);
}
#endif
#if 0
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); // int_q_element_t
typed_stack_elem_ptr(int) queue;
#define qtype cstack
typed_stack(int, qtype)
typed_stack_elem(int) *
typed_stack_elem_fn(int,new)(int value)
{
typed_stack_elem(int) *e =
(typed_stack_elem(int) *) malloc(sizeof(int_s_element_t));
e->value = value;
typed_stack_elem_ptr_set(int, qtype)(&e->next, 0);
}
void
pop
(
int n
)
{
int i;
for(i = 0; i < n; i++) {
typed_stack_elem(int) *e = typed_stack_pop(int, qtype)(&queue);
if (e == 0) {
printf("%d queue empty\n", omp_get_thread_num());
break;
} else {
printf("%d popping %d\n", omp_get_thread_num(), e->value);
}
}
}
void
push
(
int min,
int n
)
{
int i;
for(i = min; i < min+n; i++) {
printf("%d pushing %d\n", omp_get_thread_num(), i);
typed_stack_push(int, qtype)(&queue, typed_stack_elem_fn(int, new)(i));
}
}
void
dump
(
int_s_element_t *e
)
{
int i;
for(; e;
e = (int_s_element_t *) typed_stack_elem_ptr_get(int,qtype)(&e->next)) {
printf("%d stole %d\n", omp_get_thread_num(), e->value);
}
}
int
main
(
int argc,
char **argv
)
{
typed_stack_elem_ptr_set(int, qtype)(&queue, 0);
#pragma omp parallel
{
push(0, 30);
pop(10);
push(100, 12);
// pop(100);
int_s_element_t *e = typed_stack_steal(int, qtype)(&queue);
dump(e);
push(300, 30);
typed_stack_push(int, qtype)(&queue, e);
pop(100);
}
}
#endif
|
csc.h | #ifndef __csc_H
#define __csc_H
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_noomp_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const T3 x[],
T3 y[])
{
if(overwrite_y){
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
y[i] += (a * Ax[ii]) * x[j];
}
}
}
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_noomp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride,
const T3 x[],
const npy_intp y_stride,
T3 y[])
{
if(overwrite_y){
for(I j = 0; j < n_row; j++){
y[j * y_stride] = 0;
}
}
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
y[i * y_stride] += (a * Ax[ii]) * x[j * x_stride];
}
}
}
template<typename I, typename T1, typename T2,typename T3>
void csc_matvecs_noomp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Ai[], // indices for row elements in each column
const T1 Ax[],
const T2 a,
const npy_intp x_stride_row, // X_n_row == n_col
const npy_intp x_stride_col, // X_n_col == n_vecs
const T3 x[],
const npy_intp y_stride_row, // Y_n_row == n_row
const npy_intp y_stride_col, // Y_n_col == n_vecs
T3 y[])
{
if(overwrite_y){
for(npy_intp i = 0; i < n_row; i++){
for(npy_intp j = 0; j < n_vecs; j++){
y[i * y_stride_row + j * y_stride_col] = 0;
}
}
}
// preference ordering of 'y' as it is being written to.
if(y_stride_col < y_stride_row){
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
T3 * y_row = y + y_stride_row * Ai[ii];
const T3 ax = (a * Ax[ii]);
axpy_strided(n_vecs, ax, x_stride_col, x, y_stride_col, y_row);
}
x += x_stride_row;
}
}
else{
for(I m=0;m<n_vecs;m++){
const T3 * x_row = x;
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
y[y_stride_row * Ai[ii]] += (a * Ax[ii]) * (*x_row);
}
x_row += x_stride_row;
}
x += x_stride_col;
y += y_stride_col;
}
}
}
#if defined(_OPENMP)
#include "openmp.h"
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_omp_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const T3 x[],
T3 y[])
{
#pragma omp parallel
{
const int nthread = omp_get_num_threads();
const I chunk = std::max((I)1,n_row/(100*nthread));
if(overwrite_y){
#pragma omp for schedule(static)
for(I j = 0; j < n_row; j++){
y[j] = 0;
}
}
#pragma omp for schedule(dynamic,chunk)
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
const T3 aa = (a * Ax[ii]) * x[j];
atomic_add(y[i],aa);
}
}
}
}
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride,
const T3 x[],
const npy_intp y_stride,
T3 y[])
{
#pragma omp parallel
{
const int nthread = omp_get_num_threads();
const I chunk = std::max((I)1,n_row/(100*nthread));
if(overwrite_y){
#pragma omp for schedule(static)
for(I j = 0; j < n_row; j++){
y[j * y_stride] = 0;
}
}
#pragma omp for schedule(dynamic,chunk)
for(I j = 0; j < n_col; j++){
I col_start = Ap[j];
I col_end = Ap[j+1];
for(I ii = col_start; ii < col_end; ii++){
const I i = Ai[ii];
const T3 aa = (a * Ax[ii]) * x[j * x_stride];
atomic_add(y[i * y_stride],aa);
}
}
}
}
template<typename I, typename T1, typename T2,typename T3>
inline void csc_matvecs_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride_row,
const npy_intp x_stride_col,
const T3 x[],
const npy_intp y_stride_row,
const npy_intp y_stride_col,
T3 y[])
{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Ai,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
#else
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_omp_contig(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const T3 x[],
T3 y[])
{
csc_matvec_noomp_contig(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x,y);
}
template<typename I, typename T1, typename T2,typename T3>
inline void csc_matvec_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride,
const T3 x[],
const npy_intp y_stride,
T3 y[])
{
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Ai,Ax,a,x_stride,x,y_stride,y);
}
template<typename I, typename T1, typename T2,typename T3>
inline void csc_matvecs_omp_strided(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Ai[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride_row,
const npy_intp x_stride_col,
const T3 x[],
const npy_intp y_stride_row,
const npy_intp y_stride_col,
T3 y[])
{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Ai,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
#endif
// when openmp is not being used omp and noomp versions are identical
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_noomp(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride_byte,
const T3 x[],
const npy_intp y_stride_byte,
T3 y[])
{
const npy_intp y_stride = y_stride_byte/sizeof(T3);
const npy_intp x_stride = x_stride_byte/sizeof(T3);
if(y_stride == 1){
if(x_stride == 1){
csc_matvec_noomp_contig(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x,y);
}
else{
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,1,y);
}
}
else{
if(x_stride == 1){
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,1,x,y_stride,y);
}
else{
csc_matvec_noomp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,y_stride,y);
}
}
}
template<typename I, typename T1, typename T2,typename T3>
void csc_matvec_omp(const bool overwrite_y,
const I n_row,
const I n_col,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride_byte,
const T3 x[],
const npy_intp y_stride_byte,
T3 y[])
{
const npy_intp y_stride = y_stride_byte/sizeof(T3);
const npy_intp x_stride = x_stride_byte/sizeof(T3);
if(y_stride == 1){
if(x_stride == 1){
csc_matvec_omp_contig(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x,y);
}
else{
csc_matvec_omp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,1,y);
}
}
else{
if(x_stride == 1){
csc_matvec_omp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,1,x,y_stride,y);
}
else{
csc_matvec_omp_strided(overwrite_y,n_row,n_col,Ap,Aj,Ax,a,x_stride,x,y_stride,y);
}
}
}
template<typename I, typename T1, typename T2,typename T3>
inline void csc_matvecs_noomp(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride_row_byte,
const npy_intp x_stride_col_byte,
const T3 x[],
const npy_intp y_stride_row_byte,
const npy_intp y_stride_col_byte,
T3 y[])
{
const npy_intp y_stride_row = y_stride_row_byte/sizeof(T3);
const npy_intp y_stride_col = y_stride_col_byte/sizeof(T3);
const npy_intp x_stride_row = x_stride_row_byte/sizeof(T3);
const npy_intp x_stride_col = x_stride_col_byte/sizeof(T3);
if(y_stride_col==1){
if(x_stride_col==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y);
}
else if(x_stride_row==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y);
}
else{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,1,y);
}
}
else if(y_stride_row==1){
if(x_stride_col==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,1,y_stride_col,y);
}
else if(x_stride_row==1){
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,1,y_stride_col,y);
}
else{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,1,y_stride_col,y);
}
}
else{
csc_matvecs_noomp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
}
template<typename I, typename T1, typename T2,typename T3>
inline void csc_matvecs_omp(const bool overwrite_y,
const I n_row,
const I n_col,
const npy_intp n_vecs,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const npy_intp x_stride_row_byte,
const npy_intp x_stride_col_byte,
const T3 x[],
const npy_intp y_stride_row_byte,
const npy_intp y_stride_col_byte,
T3 y[])
{
const npy_intp y_stride_row = y_stride_row_byte/sizeof(T3);
const npy_intp y_stride_col = y_stride_col_byte/sizeof(T3);
const npy_intp x_stride_row = x_stride_row_byte/sizeof(T3);
const npy_intp x_stride_col = x_stride_col_byte/sizeof(T3);
if(y_stride_col==1){
if(x_stride_col==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,y_stride_row,1,y);
}
else if(x_stride_row==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,y_stride_row,1,y);
}
else{
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,1,y);
}
}
else if(y_stride_row==1){
if(x_stride_col==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,1,x,1,y_stride_col,y);
}
else if(x_stride_row==1){
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,1,x_stride_col,x,1,y_stride_col,y);
}
else{
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,1,y_stride_col,y);
}
}
else{
csc_matvecs_omp_strided(overwrite_y,n_row,n_col,n_vecs,Ap,Aj,Ax,a,x_stride_row,x_stride_col,x,y_stride_row,y_stride_col,y);
}
}
#endif |
client.c | #include <sys/types.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <netdb.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <omp.h>
#include "client.h"
#include "common.h"
#include "wrapper.h"
#define MAXEVENTS 256
void add_client_con(const char * address, const char * port, int efd) {
static struct epoll_event event;
connection * con;
con = (connection *)calloc(1, sizeof(connection));
init_connection(con, make_connected(address, port));
set_non_blocking(con->sockfd);
//disable rate limiting and TODO check that keep alive stops after connection close
//enable_keepalive(con->sockfd);
//set_recv_window(con->sockfd);
//cant add EPOLLRDHUP as EPOLLEXCLUSIVE would then fail
//instead check for a read of 0
event.events = EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE | EPOLLET;
event.data.ptr = con;
//we dont need to calloc the event its coppied.
ensure(epoll_ctl(efd, EPOLL_CTL_ADD, con->sockfd, &event) != -1);
}
void client(const char * address, const char * port, int initial, int rate) {
int efd;
struct epoll_event event;
struct epoll_event *events;
ensure((efd = epoll_create1(0)) != -1);
//buffer where events are returned
events = calloc(MAXEVENTS, sizeof(event));
for(int i = 0; i < initial; ++i)
add_client_con(address, port, efd);
//TODO split off gradual increase of client # threads
#pragma omp parallel
while (1) {
int n, i, bytes;
n = epoll_wait(efd, events, MAXEVENTS, -1);
for (i = 0; i < n; i++) {
if ((events[i].events & EPOLLERR) ||
(events[i].events & EPOLLHUP)) { // error or unexpected close
perror("epoll_wait");
close_connection(events[i].data.ptr);
continue;
} else {
if (events[i].events & EPOLLIN) {//data has been echoed back or remote has closed connection
//TODO record read bytes
bytes = black_hole_read((connection *)events[i].data.ptr);
//printf("read %d\n",bytes);
}
if (events[i].events & EPOLLOUT) {//data can be written
bytes = send_pipe((connection *)events[i].data.ptr);
//printf("wrote %d\n",bytes);
}
}
}
}
free(events);
close(efd);
}
|
GB_unop__exp2_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp2_fp64_fp64)
// op(A') function: GB (_unop_tran__exp2_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = exp2 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = exp2 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = exp2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP2 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp2_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp2 (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp2_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
coordinate_common.h | /*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#pragma once
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <limits>
#include "xgboost/data.h"
#include "xgboost/parameter.h"
#include "./param.h"
#include "../gbm/gblinear_model.h"
#include "../common/random.h"
namespace xgboost {
namespace linear {
struct CoordinateParam : public XGBoostParameter<CoordinateParam> {
int top_k;
DMLC_DECLARE_PARAMETER(CoordinateParam) {
DMLC_DECLARE_FIELD(top_k)
.set_lower_bound(0)
.set_default(0)
.describe("The number of top features to select in 'thrifty' feature_selector. "
"The value of zero means using all the features.");
}
};
/**
* \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the
* number of training instances.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
* \param w The weight.
* \param reg_alpha Unnormalised L1 penalty.
* \param reg_lambda Unnormalised L2 penalty.
*
* \return The weight update.
*/
inline double CoordinateDelta(double sum_grad, double sum_hess, double w,
double reg_alpha, double reg_lambda) {
if (sum_hess < 1e-5f) return 0.0f;
const double sum_grad_l2 = sum_grad + reg_lambda * w;
const double sum_hess_l2 = sum_hess + reg_lambda;
const double tmp = w - sum_grad_l2 / sum_hess_l2;
if (tmp >= 0) {
return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w);
} else {
return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w);
}
}
/**
* \brief Calculate update to bias.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
*
* \return The weight update.
*/
inline double CoordinateDeltaBias(double sum_grad, double sum_hess) {
return -sum_grad / sum_hess;
}
/**
* \brief Get the gradient with respect to a single feature.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto page = batch.GetView();
auto col = page[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to a single feature. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto page = batch.GetView();
auto col = page[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to the bias. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for the bias.
*/
inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint i = 0; i < ndata; ++i) {
auto &p = gpair[i * num_group + group_idx];
if (p.GetHess() >= 0.0f) {
sum_grad += p.GetGrad();
sum_hess += p.GetHess();
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Updates the gradient vector with respect to a change in weight.
*
* \param fidx The feature index.
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dw The change in weight.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateResidualParallel(int fidx, int group_idx, int num_group,
float dw, std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dw == 0.0f) return;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto page = batch.GetView();
auto col = page[fidx];
// update grad value
const auto num_row = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < num_row; ++j) {
GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0);
}
}
}
/**
* \brief Updates the gradient vector based on a change in the bias.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dbias The change in bias.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias,
std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dbias == 0.0f) return;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
GradientPair &g = (*in_gpair)[i * num_group + group_idx];
if (g.GetHess() < 0.0f) continue;
g += GradientPair(g.GetHess() * dbias, 0);
}
}
/**
* \brief Abstract class for stateful feature selection or ordering
* in coordinate descent algorithms.
*/
class FeatureSelector {
public:
/*! \brief factory method */
static FeatureSelector *Create(int choice);
/*! \brief virtual destructor */
virtual ~FeatureSelector() = default;
/**
* \brief Setting up the selector state prior to looping through features.
*
* \param model The model.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
* \param param A parameter with algorithm-dependent use.
*/
virtual void Setup(const gbm::GBLinearModel &,
const std::vector<GradientPair> &,
DMatrix *,
float , float , int ) {}
/**
* \brief Select next coordinate to update.
*
* \param iteration The iteration in a loop through features
* \param model The model.
* \param group_idx Zero-based index of the group.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
*
* \return The index of the selected feature. -1 indicates none selected.
*/
virtual int NextFeature(int iteration,
const gbm::GBLinearModel &model,
int group_idx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) = 0;
};
/**
* \brief Deterministic selection by cycling through features one at a time.
*/
class CyclicFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int , const std::vector<GradientPair> &,
DMatrix *, float, float) override {
return iteration % model.learner_model_param->num_feature;
}
};
/**
* \brief Similar to Cyclic but with random feature shuffling prior to each update.
* \note Its randomness is controllable by setting a random seed.
*/
class ShuffleFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair>&,
DMatrix *, float, float, int) override {
if (feat_index_.size() == 0) {
feat_index_.resize(model.learner_model_param->num_feature);
std::iota(feat_index_.begin(), feat_index_.end(), 0);
}
std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom());
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int, const std::vector<GradientPair> &,
DMatrix *, float, float) override {
return feat_index_[iteration % model.learner_model_param->num_feature];
}
protected:
std::vector<bst_uint> feat_index_;
};
/**
* \brief A random (with replacement) coordinate selector.
* \note Its randomness is controllable by setting a random seed.
*/
class RandomFeatureSelector : public FeatureSelector {
public:
int NextFeature(int, const gbm::GBLinearModel &model,
int, const std::vector<GradientPair> &,
DMatrix *, float, float) override {
return common::GlobalRandom()() % model.learner_model_param->num_feature;
}
};
/**
* \brief Select coordinate with the greatest gradient magnitude.
* \note It has O(num_feature^2) complexity. It is fully deterministic.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup(). That would reduce the complexity to
* O(num_feature*top_k).
*/
class GreedyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &,
DMatrix *, float, float, int param) override {
top_k_ = static_cast<bst_uint>(param);
const bst_uint ngroup = model.learner_model_param->num_output_group;
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
if (counter_.size() == 0) {
counter_.resize(ngroup);
gpair_sums_.resize(model.learner_model_param->num_feature * ngroup);
}
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
counter_[gid] = 0u;
}
}
int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-K or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
const int ngroup = model.learner_model_param->num_output_group;
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto page = batch.GetView();
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = page[i];
const bst_uint ndata = col.size();
auto &sums = gpair_sums_[group_idx * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + group_idx];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
// Find a feature with the largest magnitude of weight change
int best_fidx = 0;
double best_weight_update = 0.0f;
for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) {
auto &s = gpair_sums_[group_idx * nfeat + fidx];
float dw = std::abs(static_cast<bst_float>(
CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda)));
if (dw > best_weight_update) {
best_weight_update = dw;
best_fidx = fidx;
}
}
return best_fidx;
}
protected:
bst_uint top_k_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief Thrifty, approximately-greedy feature selector.
*
* \note Prior to cyclic updates, reorders features in descending magnitude of
* their univariate weight changes. This operation is multithreaded and is a
* linear complexity approximation of the quadratic greedy selection.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup().
*/
class ThriftyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
const bst_uint ngroup = model.learner_model_param->num_output_group;
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
if (deltaw_.size() == 0) {
deltaw_.resize(nfeat * ngroup);
sorted_idx_.resize(nfeat * ngroup);
counter_.resize(ngroup);
gpair_sums_.resize(nfeat * ngroup);
}
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto page = batch.GetView();
// column-parallel is usually fastaer than row-parallel
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = page[i];
const bst_uint ndata = col.size();
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
auto &sums = gpair_sums_[gid * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + gid];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
}
// rank by descending weight magnitude within the groups
std::fill(deltaw_.begin(), deltaw_.end(), 0.f);
std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0);
bst_float *pdeltaw = &deltaw_[0];
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
// Calculate univariate weight changes
for (bst_omp_uint i = 0; i < nfeat; ++i) {
auto ii = gid * nfeat + i;
auto &s = gpair_sums_[ii];
deltaw_[ii] = static_cast<bst_float>(CoordinateDelta(
s.first, s.second, model[i][gid], alpha, lambda));
}
// sort in descending order of deltaw abs values
auto start = sorted_idx_.begin() + gid * nfeat;
std::sort(start, start + nfeat,
[pdeltaw](size_t i, size_t j) {
return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j));
});
counter_[gid] = 0u;
}
}
int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &,
DMatrix *, float, float) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-N or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
// note that sorted_idx stores the "long" indices
const size_t grp_offset = group_idx * model.learner_model_param->num_feature;
return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset);
}
protected:
bst_uint top_k_;
std::vector<bst_float> deltaw_;
std::vector<size_t> sorted_idx_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
inline FeatureSelector *FeatureSelector::Create(int choice) {
switch (choice) {
case kCyclic:
return new CyclicFeatureSelector();
case kShuffle:
return new ShuffleFeatureSelector();
case kThrifty:
return new ThriftyFeatureSelector();
case kGreedy:
return new GreedyFeatureSelector();
case kRandom:
return new RandomFeatureSelector();
default:
LOG(FATAL) << "unknown coordinate selector: " << choice;
}
return nullptr;
}
} // namespace linear
} // namespace xgboost
|
mkldnn_os.h | /*******************************************************************************
* Copyright 2017 NEC Labs America
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/** \file
* handle various compiler/os retrictions */
#ifndef _MKLDNN_OS_H_
#define _MKLDNN_OS_H_
//#include "os_common.hpp" // not available -- we use mkldnn public API only.
#if 1
#if defined(__ve)
#define strnlen strnlen_s
#endif
// How is the restrict keyword handled? (disallow it as you encounter errors, please)
#if defined(_SX)
#elif defined(__ve)
// restrict is allowed
#ifndef __restrict
#define __restrict restrict /* ve/musl/include/stdlib.h uses __restrict !!! */
#endif
#elif defined(__INTEL_COMPILER) || defined(__GNUC__)
#define restrict /*no-restrict*/
#elif defined(WIN32)
// ???
#else
// ???
#endif // restrict keyword handling
// Any restrictions on the alignas attribute?
#ifdef __ve
#define alignas(x) alignas((x) > 16 ? 16 : (x))
#endif
#endif
// ENABLE_OPT_PRAGMAS
// set to 0 to debug pragma-related incorrect assumptions
#if !defined(ENABLE_OPT_PRAGMAS)
//#warning "Unknown system: optimization pragmas NOT USED"
//#define ENABLE_OPT_PRAGMAS 0/*XXX*/
#define ENABLE_OPT_PRAGMAS 1
#endif
// ENABLE_OMP defaults to 1
#if !defined(ENABLE_OMP)
#if defined(_SX)
#elif defined(__ve) // OMP is not yet supported by ncc/nc++
//#define ENABLE_OMP 0 // at Dec. 25th 2017 release, ncc may support OMP
#elif defined(__INTEL_COMPILER)
#elif defined(__GNUC__)
#else
#endif
#if !defined(ENABLE_OMP)
#define ENABLE_OMP 1
#endif
#endif
// -------- compiler-specific pragmas --------
// __ve compile does something with pragma omp, but it is not officially supported,
// so we use C++11 _Pragma to emit pragmas from macros and customize pragmas to
// particular compilers.
//
// Allocation directives:
// VREG : hint that array fits into one simd register
// There may be many conditions on array access!
// ALLOC_ON_VREG : hint that array fits into multiple simd registers
// ALLOC_ON_ADB : hint that array should be "cached" in special memory bank.
//
// Loop directives apply to an IMMEDIATELY FOLLOWING loop:
// ShortLoop : hint that for-loop limit is less than max simd register length
// RETAIN : hint that array should be kept accesible (cached)
// IVDEP : pretend all ptrs are independent (restrict)
//
// TODO: SX pre-loop macros must be SINGLE ones, because sxcc REQUIRES
// multiple #pragma cdir to be combined, comma-separated.
// So you can only use ONE pre-loop macro. If 2 macros,
// compiler docs say **both** will be ignored!
//
// FIXME SX alloc_on_vreg 2nd arg must be a compile-time constant
//
// Oh! ALLOC_ON_VREG cannot "decay" into RETAIN, because syntax is different
// -----------------------------------
//#define BENCHDNN_YPRAGMA(str) do{int ypr=str;}while(0);
#define BENCHDNN_MPRAGMA(str) _Pragma(str)
#define BENCHDNN_STRINGIZE(...) #__VA_ARGS__
#define PragmaQuote(...) BENCHDNN_MPRAGMA(BENCHDNN_STRINGIZE(__VA_ARGS__))
#if ENABLE_OPT_PRAGMAS && defined(_SX)
// SX preprocessor generates _Pragma(XXX) and sxc++ might be ignoring
// *some*, based on failure to produce some warning messages.
//#warning "SX optimization pragmas IN EFFECT"
# define VREG(...) PragmaQuote(cdir vreg(__VA_ARGS__))
# define ALLOC_ON_VREG(...) PragmaQuote(cdir alloc_on_vreg(__VA_ARGS__))
# define ALLOC_ON_ADB(...) PragmaQuote(cdir alloc_on_adb(__VA_ARGS__))
// Is there a pre-for-loop RETAIN for SX? For now, kludge as on_adb.
# define RETAIN(...) PragmaQuote(cdir on_adb(__VA_ARGS__))
# define RETAIN1st(var,...) PragmaQuote(cdir on_adb(var))
# define ShortLoop() _Pragma("cdir shortloop")
# define ShortLoopTest() /*?*/
# define IVDEP() _Pragma("cdir nodep")
# define UNROLL(x)
# define PRAGMA_UNROLL
#elif ENABLE_OPT_PRAGMAS && defined(__ve)
//# warning "__ve optimization pragmas IN EFFECT"
# define VREG(...) PragmaQuote(_NEC vreg(__VA_ARGS__))
# define ALLOC_ON_VREG(...)
# define ALLOC_ON_ADB(...)
# define RETAIN(...) PragmaQuote(_NEC retain(__VA_ARGS__))
# define RETAIN1st(var,...) PragmaQuote(_NEC retain(var))
# define ShortLoop() _Pragma("_NEC shortloop")
# define ShortLoopTest() _Pragma("_NEC shortloop_reduction")
# define IVDEP() _Pragma("_NEC ivdep")
# define UNROLL(x) PragmaQuote(_NEC unroll(x))
# define PRAGMA_UNROLL PragmaQuote(_NEC unroll(4))
#elif ENABLE_OPT_PRAGMAS && defined(__INTEL_COMPILER)
// restrict keyword requires the "-restrict" CFLAG; __restrict__ works anyway
# define restrict __restrict__
# define IVDEP() _Pragma("ivdep")
# define UNROLL(x) PragmaQuote(unroll(x))
# define PRAGMA_UNROLL PragmaQuote(unroll)
// TODO:
# define VREG(...)
# define ALLOC_ON_VREG(...)
# define ALLOC_ON_ADB(...)
# define RETAIN(...)
# define ShortLoop()
# define ShortLoopTest()
#elif ENABLE_OPT_PRAGMAS && defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER)
//--------------------------------------------
// taken from MSVC code in mkldnn_thread.hpp
//# warning "MSVC still supports omp 2.0 only"
# define collapse(x)
//# define PRAGMA_OMP_SIMD(...) ... below
//--------------------------------------------
# define UNROLL(x)
# define PRAGMA_UNROLL
# define VREG(...)
# define ALLOC_ON_VREG(...)
# define ALLOC_ON_ADB(...)
# define RETAIN(...)
# define ShortLoop()
# define ShortLoopTest()
#elif ENABLE_OPT_PRAGMAS && defined(__GNUC__)
//#warning "__GNUC optimization pragmas IN EFFECT"
# define VREG(...)
# define ALLOC_ON_VREG(...)
# define ALLOC_ON_ADB(...)
# define RETAIN(...)
# define ShortLoop()
# define ShortLoopTest()
# define IVDEP() _Pragma("GCC ivdep")
# define UNROLL(x) PragmaQuote(GCC unroll x)
# define PRAGMA_UNROLL PragmaQuote(GCC unroll(4))
#else /* A new system might begin by ignoring the optimization pragmas */
# warning "Please check if _Pragma macros can be defined for this platorm"
# define VREG(...)
# define ALLOC_ON_VREG(...)
# define ALLOC_ON_ADB(...)
# define RETAIN(...)
# define ShortLoop()
# define ShortLoopTest()
# define IVDEP()
# define UNROLL(x)
# define PRAGMA_UNROLL
#endif
#if ENABLE_OMP
# define OMP(...) PragmaQuote(omp __VA_ARGS__)
# if defined(__ve)
//# warning "__ve enabling #pragma omp"
# endif
# if defined(_SX) // no support for "simd" pragmas
# elif defined(_MSC_VER) && !defined(__clang__) && !defined(__INTEL_COMPILER)
# elif defined(__ve)
# define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__)
//# warning "__ve (ncc) ignores simd directive in PRAGMA_OMP_SIMD(...)
# define OMPSIMD(...) PragmaQuote(omp __VA_ARGS__)
# define PRAGMA_OMP_SIMD(...) PragmaQuote(omp __VA_ARGS__)
# else // defined(__GNUC) or intel or ...
# define PRAGMASIMD(...) PragmaQuote(simd __VA_ARGS__)
# define OMPSIMD(...) PragmaQuote(omp simd __VA_ARGS__)
# define PRAGMA_OMP_SIMD(...) PragmaQuote(omp simd __VA_ARGS__)
# endif
#endif
#ifndef PRAGMASIMD
# define PRAGMASIMD(...)
#endif
#ifndef OMPSIMD
# define OMPSIMD(...)
#endif
#ifndef PRAGMA_OMP_SIMD
# define PRAGMA_OMP_SIMD(...)
#endif
#ifndef OMP
# define OMP(...)
#if defined(REF_LRN_HPP) // mostly ignore: show for cpu_engine compile at least
# warning "not enabling #pragma omp (mkldnn_os.h)"
#endif
#endif
#endif // _MKLDNN_OS_H_
|
bitmap.h | /*!
* Copyright 2014 by Contributors
* \file bitmap.h
* \brief a simple implement of bitmap
* NOTE: bitmap is only threadsafe per word access, remember this when using bitmap
* \author Tianqi Chen
*/
#ifndef XGBOOST_UTILS_BITMAP_H_
#define XGBOOST_UTILS_BITMAP_H_
#include <vector>
#include "./utils.h"
#include "./omp.h"
// GLC parallel lambda premitive
#include <core/parallel/lambda_omp.hpp>
#include <core/parallel/pthread_tools.hpp>
namespace xgboost {
namespace utils {
/*! \brief bit map that contains set of bit indicators */
struct BitMap {
/*! \brief internal data structure */
std::vector<uint32_t> data;
/*!
* \brief resize the bitmap to be certain size
* \param size the size of bitmap
*/
inline void Resize(size_t size) {
data.resize((size + 31U) >> 5, 0);
}
/*!
* \brief query the i-th position of bitmap
* \param i the position in
*/
inline bool Get(size_t i) const {
return (data[i >> 5] >> (i & 31U)) & 1U;
}
/*!
* \brief set i-th position to true
* \param i position index
*/
inline void SetTrue(size_t i) {
data[i >> 5] |= (1 << (i & 31U));
}
/*! \brief initialize the value of bit map from vector of bool*/
inline void InitFromBool(const std::vector<int> &vec) {
this->Resize(vec.size());
// parallel over the full cases
bst_omp_uint nsize = static_cast<bst_omp_uint>(vec.size() / 32);
// #pragma omp parallel for schedule(static)
// for (bst_omp_uint i = 0; i < nsize; ++i) {
turi::parallel_for(0, nsize, [&](size_t i) {
uint32_t res = 0;
for (int k = 0; k < 32; ++k) {
int bit = vec[(i << 5) | k];
res |= (bit << k);
}
data[i] = res;
});
if (nsize != vec.size()) data.back() = 0;
for (size_t i = nsize; i < vec.size(); ++i) {
if (vec[i]) this->SetTrue(i);
}
}
/*! \brief clear the bitmap, set all places to false */
inline void Clear(void) {
std::fill(data.begin(), data.end(), 0U);
}
};
} // namespace utils
} // namespace xgboost
#endif // XGBOOST_UTILS_BITMAP_H_
|
random_par.c |
//**********************************************************
// Parallel Pseudo random number generator:
//
// USAGE:
//
// The pseudo random sequence is seeded with a range
//
// void seed(lower_limit, higher_limit)
//
// and then subsequent calls to the random number generator
// generates values in the sequence:
//
// double drandom()
//
// A leap frog method is used to assure non-overlapping
// sequences for each thread.
//
// Note: these functions are to be called from inside the
// the OpenMP parallel region that will use the sequence.
//
// BACKGROUND:
//
// We are using a modulus of 2^31-1 and a multiplier from
// the Hoaglin LCGs in the following article:
//
// http://random.mat.sbg.ac.at/~charly/server/node3.html#lcg
//
// we are using a zero addend just to make the leap frog
// algorithm easier to implement.
//
// HISTORY:
//
// 9/2008: Written by Tim Mattson by cutting and pasting
// from a generator written by Larry Meadows
//
//***********************************************************
#include <omp.h>
static unsigned long long MULTIPLIER = 764261123;
static unsigned long long PMOD = 2147483647;
static unsigned long long mult_n;
double random_low, random_hi;
#define MAX_THREADS 128
static unsigned long long pseed[MAX_THREADS][4]; //[4] to padd to cache line
//size to avoid false sharing
unsigned long long random_last = 0;
#pragma omp threadprivate(random_last)
double drandom()
{
unsigned long long random_next;
double ret_val;
//
// compute an integer random number from zero to mod
//
random_next = (unsigned long long)((mult_n * random_last)% PMOD);
random_last = random_next;
//
// shift into preset range
//
ret_val = ((double)random_next/(double)PMOD)*(random_hi-random_low)+random_low;
return ret_val;
}
//
// set the seed, the multiplier and the range
//
void seed(double low_in, double hi_in)
{
int i, id, nthreads;
unsigned long long iseed;
id = omp_get_thread_num();
#pragma omp single
{
if(low_in < hi_in)
{
random_low = low_in;
random_hi = hi_in;
}
else
{
random_low = hi_in;
random_hi = low_in;
}
//
// The Leapfrog method ... adjust the multiplier so you stride through
// the sequence by increments of "nthreads" and adust seeds so each
// thread starts with the right offset
//
nthreads = omp_get_num_threads();
iseed = PMOD/MULTIPLIER; // just pick a reasonable seed
pseed[0][0] = iseed;
mult_n = MULTIPLIER;
for (i = 1; i < nthreads; ++i)
{
iseed = (unsigned long long)((MULTIPLIER * iseed) % PMOD);
pseed[i][0] = iseed;
mult_n = (mult_n * MULTIPLIER) % PMOD;
}
}
random_last = (unsigned long long) pseed[id][0];
}
|
GB_unop__sinh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fc64_fc64)
// op(A') function: GB (_unop_tran__sinh_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = csinh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csinh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = csinh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csinh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csinh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
couple_waves.c | /*
* couple_waves.c
*
* Couple 3 waves contained in C99 complex arrays.
*
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mytypes.h"
#include "light.h"
#include "pf3dbench.h"
#include "util.h"
#include "runparm.h"
#include "pf3dbenchvars.h"
void couple_z(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp)
{
real c20, cslamt, snlamt, r_zlam, r, fratio;
real r_fratio, cratio, zac2;
double zlam, c2re, c2im;
rcomplex a0t, a2t, c2, z3;
int ix, iy, iz;
long it0;
cratio= 1.0e3;
fratio = SQRT(0.9);
r_fratio = ONE/fratio;
c20 = 0.25 * cratio * r_fratio;
start_omp_time();
#ifdef _OPENMP
/* #pragma omp parallel for simd aligned(t0,t2:64) simdlen(real_lane_count) COLLAPSE(3) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, it0, zac2, c2re, c2im) */
#pragma omp parallel for COLLAPSE(2) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, it0, zac2, c2re, c2im)
#endif
for (iz=0; iz<nzl; iz++) {
for (iy=0; iy<nyl; iy++) {
#ifdef _OPENMP
#pragma omp simd aligned(t0,t2:64) simdlen(real_lane_count)
#endif
for (ix=0; ix<nxl; ix++) {
it0= CELTNDX(ix,iy,iz);
c2 = c20 * denp[it0];
c2re = CREAL(c2); c2im = CIMAG(c2);
/* compute lamda = sqrt(|c2|^2) using doubles
to avoid underflow. */
zlam = c2re*c2re + c2im*c2im + 1.0e-34;
zlam = sqrt(zlam);
snlamt = SIN(zlam * dt * HALF);
cslamt = COS(zlam * dt * HALF);
a0t = t0[it0];
a2t = t2[it0] * fratio;
/* normalize c2 */
r_zlam= ONE/(real)zlam;
c2 *= r_zlam;
/* compute the square of c2 after scaling */
zac2 = zabs2(c2);
/* compute new A0 */
z3 = c2 * a2t * snlamt ;
t0[it0] = a0t * cslamt - IREAL * z3;
/* compute new A2 */
r = zac2 * cslamt;
z3 = CONJ(c2) * a0t * snlamt;
t2[it0] = ( a2t * r - IREAL * z3 ) * r_fratio;
} /* end for-loop */
} /* end for-loop */
}
stop_omp_time();
}
|
stencil_opt2.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "malloc2D.h"
#include "timer.h"
#define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp)
int main(int argc, char *argv[])
{
#pragma omp parallel
#pragma omp master
printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total;
double init_time, flush_time, stencil_time, total_time;
int imax=2002, jmax = 2002;
double** xtmp;
double** x = malloc2D(jmax, imax);
double** xnew = malloc2D(jmax, imax);
int *flush = (int *)malloc(jmax*imax*sizeof(int)*4);
cpu_timer_start(&tstart_total);
cpu_timer_start(&tstart_init);
#pragma omp parallel for
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp parallel for
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
init_time += cpu_timer_stop(tstart_init);
for (int iter = 0; iter < 10000; iter++){
cpu_timer_start(&tstart_flush);
#pragma omp parallel for
for (int l = 1; l < jmax*imax*4; l++){
flush[l] = 1.0;
}
flush_time += cpu_timer_stop(tstart_flush);
cpu_timer_start(&tstart_stencil);
#pragma omp parallel for
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
stencil_time += cpu_timer_stop(tstart_stencil);
SWAP_PTR(xnew, x, xtmp);
if (iter%1000 == 0) printf("Iter %d\n",iter);
}
total_time += cpu_timer_stop(tstart_total);
printf("Timing is init %f flush %f stencil %f total %f\n",
init_time,flush_time,stencil_time,total_time);
free(x);
free(xnew);
free(flush);
}
|
5.ordered.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#define N 16
/* Q1: How can you avoid the intermixing of printf messages in the two */
/* loops? */
/* Q2: How can you ensure that a thread always executes two consecutive */
/* in order during the execution of the first loop? */
int main()
{
int i;
omp_set_num_threads(8);
#pragma omp parallel
{
#pragma omp for schedule(dynamic, 2) ordered//nowait
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 1 - (%d) gets iteration %d\n",id,i);
}
#pragma omp for schedule(dynamic) ordered
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
#pragma omp ordered
printf("Loop 2 - (%d) gets iteration %d\n",id,i);
}
}
return 0;
}
|
DRB079-taskdep3-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tasks with depend clauses to ensure execution order, no data races.
*/
#include <stdio.h>
#include <assert.h>
#include <unistd.h>
int main()
{
int i=0, j, k;
#pragma omp parallel
#pragma omp single
{
#pragma omp task depend (out:i)
{
sleep(3);
i = 1;
}
#pragma omp task depend (in:i)
j =i;
#pragma omp task depend (in:i)
k =i;
}
printf ("j=%d k=%d\n", j, k);
assert (j==1 && k==1);
return 0;
}
|
a.8.1.c | /* { dg-do compile } */
#include <math.h>
void
a8 (int n, int m, float *a, float *b, float *y, float *z)
{
int i;
#pragma omp parallel
{
#pragma omp for nowait
for (i = 1; i < n; i++)
b[i] = (a[i] + a[i - 1]) / 2.0;
#pragma omp for nowait
for (i = 0; i < m; i++)
y[i] = sqrt (z[i]);
}
}
|
mesh.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "utils.h"
void set_num_threads(int num_threads)
{
if (num_threads>0) omp_set_num_threads(num_threads);
}
int get_num_threads()
{
//Calculate number of threads
int num_threads=0;
#pragma omp parallel
{
#pragma omp atomic
num_threads++;
}
return num_threads;
}
int assign_cic(FLOAT* mesh, const int* nmesh, const FLOAT* positions, const FLOAT* weights, size_t npositions) {
// Assign positions (weights) to mesh
// Asumes periodic boundaries
// Positions must be in [0,nmesh-1]
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
for (size_t ii=0; ii<npositions; ii++) {
const FLOAT weight = weights[ii];
const FLOAT *pos = &(positions[ii*NDIM]);
int ix0 = ((int) pos[0]) % nmesh[0];
int iy0 = ((int) pos[1]) % nmesh[1];
int iz0 = ((int) pos[2]) % nmesh[2];
//int ix0 = (int) pos[0];
//int iy0 = (int) pos[1];
//int iz0 = (int) pos[2];
//if (ix0<0 || ix0>=nmesh[0] || iy0<0 || iy0>=nmesh[1] || iz0<0 || iz0>=nmesh[2]) {
// printf("Index out of range: (ix,iy,iz) = (%d,%d,%d) for (%.3f,%.3f,%.3f)\n",ix0,iy0,iz0,pos[0],pos[1],pos[2]);
// return -1;
//}
FLOAT dx = pos[0] - ix0;
FLOAT dy = pos[1] - iy0;
FLOAT dz = pos[2] - iz0;
size_t ixp = nmeshyz*((ix0+1) % nmesh[0]);
size_t iyp = nmeshz*((iy0+1) % nmesh[1]);
size_t izp = (iz0+1) % nmesh[2];
ix0 *= nmeshyz;
iy0 *= nmeshz;
mesh[ix0+iy0+iz0] += (1-dx)*(1-dy)*(1-dz)*weight;
mesh[ix0+iy0+izp] += (1-dx)*(1-dy)*dz*weight;
mesh[ix0+iyp+iz0] += (1-dx)*dy*(1-dz)*weight;
mesh[ix0+iyp+izp] += (1-dx)*dy*dz*weight;
mesh[ixp+iy0+iz0] += dx*(1-dy)*(1-dz)*weight;
mesh[ixp+iy0+izp] += dx*(1-dy)*dz*weight;
mesh[ixp+iyp+iz0] += dx*dy*(1-dz)*weight;
mesh[ixp+iyp+izp] += dx*dy*dz*weight;
}
return 0;
}
int convolve(FLOAT* mesh, const int* nmesh, const FLOAT* kernel, const int* nkernel) {
// Performs a Gaussian smoothing using brute-force convolution.
// Asumes periodic boundaries
FLOAT sumw = 0;
size_t size = nkernel[0]*nkernel[1]*nkernel[2];
const size_t nkernelz = nkernel[2];
const size_t nkernelyz = nkernel[2]*nkernel[1];
for (size_t ii=0; ii<size; ii++) sumw += kernel[ii];
// Take a copy of the data we're smoothing.
size = nmesh[0]*nmesh[1]*nmesh[2];
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
FLOAT *ss = (FLOAT *) malloc(size*sizeof(FLOAT));
for (size_t ii=0; ii<size; ii++) ss[ii] = mesh[ii];
FLOAT rad[NDIM];
for (int idim=0; idim<NDIM; idim++) {
rad[idim] = nkernel[idim] / 2;
if (nkernel[idim] % 2 == 0) {
printf("Kernel size must be odd");
free(ss);
return -1;
}
}
#pragma omp parallel for shared(mesh,ss,kernel)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
for (int iz=0; iz<nmesh[2]; iz++) {
FLOAT sumd = 0;
for (int dx=-rad[0]; dx<=rad[0]; dx++) {
size_t iix = nmeshyz*((ix+dx+nmesh[0]) % nmesh[0]);
size_t jjx = nkernelyz*(rad[0]+dx);
for (int dy=-rad[1]; dy<=rad[1]; dy++) {
size_t iiy = nmeshz*((iy+dy+nmesh[1]) % nmesh[1]);
size_t jjy = nkernelz*(rad[1]+dy);
for (int dz=-rad[2]; dz<=rad[2]; dz++) {
int iiz = (iz+dz+nmesh[2]) % nmesh[2];
size_t ii = iix+iiy+iiz;
size_t jj = jjx+jjy+rad[2]+dz;
sumd += kernel[jj]*ss[ii];
}
}
}
mesh[nmeshyz*ix+nmeshz*iy+iz] = sumd/sumw;
}
}
}
free(ss);
return 0;
}
int smooth_gaussian(FLOAT* mesh, const int* nmesh, const FLOAT* smoothing_radius, const FLOAT nsigmas) {
// Performs a Gaussian smoothing using brute-force convolution.
// Now set up the smoothing stencil.
// The number of grid points to search: >= nsigmas * smoothing_radius.
int rad[NDIM], nkernel[NDIM];
FLOAT fact[NDIM];
for (int idim=0; idim<NDIM; idim++) {
rad[idim] = (size_t) (nsigmas*smoothing_radius[idim]*nmesh[idim] + 1.0);
nkernel[idim] = 2*rad[idim] + 1;
fact[idim] = 1.0/(nmesh[idim]*smoothing_radius[idim])/(nmesh[idim]*smoothing_radius[idim]);
}
FLOAT *kernel = (FLOAT *) malloc(nkernel[0]*nkernel[1]*nkernel[2]*sizeof(FLOAT));
#pragma omp parallel for shared(kernel)
for (int dx=-rad[0]; dx<=rad[0]; dx++) {
for (int dy=-rad[1]; dy<=rad[1]; dy++) {
for (int dz=-rad[2]; dz<=rad[2]; dz++) {
size_t ii = nkernel[1]*nkernel[2]*(rad[0]+dx)+nkernel[0]*(dy+rad[1])+(dz+rad[2]);
FLOAT r2 = fact[0]*dx*dx+fact[1]*dy*dy+fact[2]*dz*dz;
if (r2 < nsigmas*nsigmas) kernel[ii] = exp(-r2/2);
else kernel[ii] = 0;
}
}
}
convolve(mesh,nmesh,kernel,nkernel);
free(kernel);
return 0;
}
/*
void smooth_fft_gaussian(FLOAT *mesh, const int* nmesh, const FLOAT* smoothing_radius) {
// Performs a Gaussian smoothing using the FFTW library (v3), assumed to be
// installed already. Rf is assumed to be in box units.
// Make temporary vectors. FFTW uses double precision.
size_t size = nmesh[0]*nmesh[1]*nmesh[2];
fftw_complex * meshk = (fftw_complex *) malloc(nmesh[0]*nmesh[1]*(nmesh[2]/2+1)*sizeof(fftw_complex));
// Generate the FFTW plan files.
fftw_init_threads();
fftw_plan_with_nthreads(omp_get_max_threads());
fftw_plan fplan = fftw_plan_dft_r2c_3d(nmesh[0],nmesh[1],nmesh[2],mesh,meshk,FFTW_ESTIMATE);
fftw_plan iplan = fftw_plan_dft_c2r_3d(nmesh[0],nmesh[1],nmesh[2],meshk,mesh,FFTW_ESTIMATE);
fftw_execute(fplan);
// Now multiply by the smoothing filter.
FLOAT fact[NDIM];
for (int idim=0; idim<NDIM; idim++) fact[idim] = 0.5*smoothing_radius[idim]*smoothing_radius[idim]*(2*M_PI)*(2*M_PI);
#pragma omp parallel for shared(meshk)
for (int ix=0; ix<nmesh[0]; ix++) {
int iix = (ix<=nmesh[0]/2) ? ix : ix-nmesh[0];
for (int iy=0; iy<nmesh[1]; iy++) {
int iiy = (iy<=nmesh[1]/2) ? iy : iy-nmesh[1];
for (int iz=0; iz<nmesh[2]/2+1; iz++) {
int iiz = iz;
size_t ip = nmesh[1]*(nmesh[2]/2+1)*ix+(nmesh[2]/2+1)*iy+iz;
FLOAT smth = exp(-fact[0]*iix*iix+fact[1]*iiy*iiy+fact[2]*iiz*iiz));
meshk[ip][0] *= smth;
meshk[ip][1] *= smth;
}
}
}
meshk[0][0] = meshk[0][1] = 0; // Set the mean to zero.
fftw_execute(iplan);
#pragma omp parallel for shared(mesh)
for (size_t ii=0; ii<size; ii++) mesh[ii] /= size;
fftw_destroy_plan(fplan);
fftw_destroy_plan(iplan);
fftw_cleanup_threads();
free(meshk);
}
*/
int read_finite_difference_cic(const FLOAT* mesh, const int* nmesh, const FLOAT* boxsize, const FLOAT* positions, FLOAT* shifts, size_t npositions) {
// Computes the displacement field from mesh using second-order accurate
// finite difference and shifts the data and randoms.
// The displacements are pulled from the grid onto the positions of the
// particles using CIC.
// Positions must be in [0,nmesh-1]
// Output is in boxsize unit
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
FLOAT cell[NDIM];
for (int idim=0; idim<NDIM; idim++) cell[idim] = 2.0*boxsize[idim]/nmesh[idim];
int flag = 0;
#pragma omp parallel for shared(mesh,positions,shifts)
for (size_t ii=0; ii<npositions; ii++) {
if (flag) continue;
// This is written out in gory detail both to make it easier to
// see what's going on and to encourage the compiler to optimize
// and vectorize the code as much as possible.
const FLOAT *pos = &(positions[ii*NDIM]);
int ix0 = ((int) pos[0]) % nmesh[0];
int iy0 = ((int) pos[1]) % nmesh[1];
int iz0 = ((int) pos[2]) % nmesh[2];
//int ix0 = (int) pos[0];
//int iy0 = (int) pos[1];
//int iz0 = (int) pos[2];
//if (ix0<0 || ix0>=nmesh[0] || iy0<0 || iy0>=nmesh[1] || iz0<0 || iz0>=nmesh[2]) {
// printf("Index out of range: (ix,iy,iz) = (%d,%d,%d) for (%.3f,%.3f,%.3f)\n",ix0,iy0,iz0,pos[0],pos[1],pos[2]);
// flag = 1;
// continue;
//}
FLOAT dx = pos[0] - ix0;
FLOAT dy = pos[1] - iy0;
FLOAT dz = pos[2] - iz0;
size_t ixp = nmeshyz*((ix0+1) % nmesh[0]);
size_t ixpp = nmeshyz*((ix0+2) % nmesh[0]);
size_t ixm = nmeshyz*((ix0-1+nmesh[0]) % nmesh[0]);
size_t iyp = nmeshz*((iy0+1) % nmesh[1]);
size_t iypp = nmeshz*((iy0+2) % nmesh[1]);
size_t iym = nmeshz*((iy0-1+nmesh[1]) % nmesh[1]);
size_t izp = (iz0+1) % nmesh[2];
size_t izpp = (iz0+2) % nmesh[2];
size_t izm = (iz0-1+nmesh[2]) % nmesh[2];
ix0 *= nmeshyz;
iy0 *= nmeshz;
FLOAT px,py,pz,wt;
wt = (1-dx)*(1-dy)*(1-dz);
px = (mesh[ixp+iy0+iz0]-mesh[ixm+iy0+iz0])*wt;
py = (mesh[ix0+iyp+iz0]-mesh[ix0+iym+iz0])*wt;
pz = (mesh[ix0+iy0+izp]-mesh[ix0+iy0+izm])*wt;
wt = dx*(1-dy)*(1-dz);
px += (mesh[ixpp+iy0+iz0]-mesh[ix0+iy0+iz0])*wt;
py += (mesh[ixp+iyp+iz0]-mesh[ixp+iym+iz0])*wt;
pz += (mesh[ixp+iy0+izp]-mesh[ixp+iy0+izm])*wt;
wt = (1-dx)*dy*(1-dz);
px += (mesh[ixp+iyp+iz0]-mesh[ixm+iyp+iz0])*wt;
py += (mesh[ix0+iypp+iz0]-mesh[ix0+iy0+iz0])*wt;
pz += (mesh[ix0+iyp+izp]-mesh[ix0+iyp+izm])*wt;
wt = (1-dx)*(1-dy)*dz;
px += (mesh[ixp+iy0+izp]-mesh[ixm+iy0+izp])*wt;
py += (mesh[ix0+iyp+izp]-mesh[ix0+iym+izp])*wt;
pz += (mesh[ix0+iy0+izpp]-mesh[ix0+iy0+iz0])*wt;
wt = dx*dy*(1-dz);
px += (mesh[ixpp+iyp+iz0]-mesh[ix0+iyp+iz0])*wt;
py += (mesh[ixp+iypp+iz0]-mesh[ixp+iy0+iz0])*wt;
pz += (mesh[ixp+iyp+izp]-mesh[ixp+iyp+izm])*wt;
wt = dx*(1-dy)*dz;
px += (mesh[ixpp+iy0+izp]-mesh[ix0+iy0+izp])*wt;
py += (mesh[ixp+iyp+izp]-mesh[ixp+iym+izp])*wt;
pz += (mesh[ixp+iy0+izpp]-mesh[ixp+iy0+iz0])*wt;
wt = (1-dx)*dy*dz;
px += (mesh[ixp+iyp+izp]-mesh[ixm+iyp+izp])*wt;
py += (mesh[ix0+iypp+izp]-mesh[ix0+iy0+izp])*wt;
pz += (mesh[ix0+iyp+izpp]-mesh[ix0+iyp+iz0])*wt;
wt = dx*dy*dz;
px += (mesh[ixpp+iyp+izp]-mesh[ix0+iyp+izp])*wt;
py += (mesh[ixp+iypp+izp]-mesh[ixp+iy0+izp])*wt;
pz += (mesh[ixp+iyp+izpp]-mesh[ixp+iyp+iz0])*wt;
FLOAT *sh = &(shifts[ii*NDIM]);
//px *= boxsize[0]*boxsize[0];
//py *= boxsize[0]*boxsize[0];
//pz *= boxsize[0]*boxsize[0];
sh[0] = px/cell[0];
sh[1] = py/cell[1];
sh[2] = pz/cell[2];
}
if (flag) return -1;
return 0;
}
int read_cic(const FLOAT* mesh, const int* nmesh, const FLOAT* positions, FLOAT* shifts, size_t npositions) {
// Positions must be in [0,nmesh-1]
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
int flag = 0;
#pragma omp parallel for shared(mesh,positions,shifts,flag)
for (size_t ii=0; ii<npositions; ii++) {
if (flag) continue;
const FLOAT *pos = &(positions[ii*NDIM]);
int ix0 = ((int) pos[0]) % nmesh[0];
int iy0 = ((int) pos[1]) % nmesh[1];
int iz0 = ((int) pos[2]) % nmesh[2];
//int ix0 = (int) pos[0];
//int iy0 = (int) pos[1];
//int iz0 = (int) pos[2];
//if (ix0<0 || ix0>=nmesh[0] || iy0<0 || iy0>=nmesh[1] || iz0<0 || iz0>=nmesh[2]) {
// printf("Index out of range: (ix,iy,iz) = (%d,%d,%d) for (%.3f,%.3f,%.3f)\n",ix0,iy0,iz0,pos[0],pos[1],pos[2]);
// flag = 1;
// continue;
//}
FLOAT dx = pos[0] - ix0;
FLOAT dy = pos[1] - iy0;
FLOAT dz = pos[2] - iz0;
size_t ixp = nmeshyz*((ix0+1) % nmesh[0]);
size_t iyp = nmeshz*((iy0+1) % nmesh[1]);
size_t izp = (iz0+1) % nmesh[2];
ix0 *= nmeshyz;
iy0 *= nmeshz;
FLOAT px;
px = mesh[ix0+iy0+iz0]*(1-dx)*(1-dy)*(1-dz);
px += mesh[ix0+iy0+izp]*(1-dx)*(1-dy)*dz;
px += mesh[ix0+iyp+iz0]*(1-dx)*dy*(1-dz);
px += mesh[ix0+iyp+izp]*(1-dx)*dy*dz;
px += mesh[ixp+iy0+iz0]*dx*(1-dy)*(1-dz);
px += mesh[ixp+iy0+izp]*dx*(1-dy)*dz;
px += mesh[ixp+iyp+iz0]*dx*dy*(1-dz);
px += mesh[ixp+iyp+izp]*dx*dy*dz;
shifts[ii] = px;
}
if (flag) return -1;
return 0;
}
/*
int copy(FLOAT* input_array, FLOAT* output_array, const size_t size) {
#pragma omp parallel for schedule(dynamic) shared(input_array, output_array)
for (size_t ii=0; ii<size; ii++) output_array[ii] = input_array[ii];
return 0;
}
*/
int copy(FLOAT* input_array, FLOAT* output_array, const size_t size) {
int chunksize = 100000;
#pragma omp parallel for schedule(static, chunksize) shared(input_array, output_array)
for (size_t ii=0; ii<size; ii++) output_array[ii] = input_array[ii];
return 0;
}
int prod_sum(FLOAT* mesh, const int* nmesh, const FLOAT* coords, const int exp) {
// We expand everything to help compiler
// Slightly faster than a numpy code
// NOTE: coords should list arrays to apply along z, y and x, in this order
const size_t nmeshz = nmesh[2];
const size_t nmeshypz = nmesh[1] + nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
if (exp == -1) {
#pragma omp parallel for shared(mesh)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT xy = coords[nmeshz + iy] + coords[nmeshypz + ix];
size_t ixy = nmeshyz*ix + nmeshz*iy;
for (int iz=0; iz<nmesh[2]; iz++) mesh[ixy + iz] /= (xy + coords[iz]);
}
}
}
else if (exp == 1) {
#pragma omp parallel for shared(mesh)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT xy = coords[nmeshz + iy] + coords[nmeshypz + ix];
size_t ixy = nmeshyz*ix + nmeshz*iy;
for (int iz=0; iz<nmesh[2]; iz++) mesh[ixy + iz] *= (xy + coords[iz]);
}
}
}
else {
#pragma omp parallel for shared(mesh)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT xy = coords[nmeshz + iy] + coords[nmeshypz + ix];
size_t ixy = nmeshyz*ix + nmeshz*iy;
for (int iz=0; iz<nmesh[2]; iz++) mesh[ixy + iz] *= POW((xy + coords[iz]), exp);
}
}
}
return 0.;
}
|
GB_binop__lor_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_bool)
// A*D function (colscale): GB (_AxD__lor_bool)
// D*A function (rowscale): GB (_DxB__lor_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_bool)
// C=scalar+B GB (_bind1st__lor_bool)
// C=scalar+B' GB (_bind1st_tran__lor_bool)
// C=A+scalar GB (_bind2nd__lor_bool)
// C=A'+scalar GB (_bind2nd_tran__lor_bool)
// C type: bool
// A type: bool
// A pattern? 0
// B type: bool
// B pattern? 0
// BinaryOp: cij = (aij || bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x || y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_BOOL || GxB_NO_LOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x || bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij || y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x || aij) ; \
}
GrB_Info GB (_bind1st_tran__lor_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij || y) ; \
}
GrB_Info GB (_bind2nd_tran__lor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <limits>
#include <vector>
#include <utility>
#include <string>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../../common/alm.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
struct NumpyBinaryScalarParam : public dmlc::Parameter<NumpyBinaryScalarParam> {
double scalar;
bool is_int;
DMLC_DECLARE_PARAMETER(NumpyBinaryScalarParam) {
DMLC_DECLARE_FIELD(scalar).set_default(1).describe("Scalar input value");
DMLC_DECLARE_FIELD(is_int).set_default(true).describe(
"Indicate whether scalar input is int type");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream scalar_s, is_int_s;
scalar_s << std::setprecision(std::numeric_limits<double>::max_digits10) << scalar;
is_int_s << is_int;
(*dict)["scalar"] = scalar_s.str();
(*dict)["is_int"] = is_int_s.str();
}
};
inline bool NumpyBinaryScalarType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
if (common::is_int(in_attrs->at(0)) && !scalar_is_int) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat64);
} else if (in_attrs->at(0) == mshadow::kBool) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, scalar_is_int ? mshadow::kInt64 : mshadow::kFloat64);
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
}
return out_attrs->at(0) != -1;
}
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes =
input.aux_data(rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row =
input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1] !=
row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
CHECK_EQ(output.shape(), input.shape());
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(
stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType* in = input.data().dptr<DType>();
const IType* column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType* row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row =
!last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter :
item_count - row_item_start_iter;
if (input_items_this_row) {
const IType* this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType* row_data_start = in + row_item_start_iter;
DType* output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template <typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu>* stream,
const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray& output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template <typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const NDArray& input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu>* stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template <typename OP>
static void Compute_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
TBlob temp_tblob;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if ((common::is_int(inputs[0].type_flag_) && !scalar_is_int) ||
(inputs[0].type_flag_ == kBool)) {
Tensor<cpu, 1, DType> temp_tensor =
ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<cpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
Compute_<OP>(attrs, ctx, s, inputs, req, outputs);
}
template <typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
TBlob temp_tblob;
if (common::is_int(inputs[0].type_flag_) && !scalar_is_int) {
Tensor<xpu, 1, double> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, double>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<xpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(temp_tblob.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template <typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template <typename OP>
static void Backward_(const nnvm::NodeAttrs& attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<
mxnet::op::mxnet_op::op_with_req<mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>,
cpu>::Launch(s,
inputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
template <typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu>* s = ctx.get_stream<xpu>();
Backward_<OP>(attrs, s, inputs, req, outputs);
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser(ParamParser<NumpyBinaryScalarParam>) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
.set_attr<mxnet::alm::FChangeLayout>("FChangeLayout", ElemwiseChangeLayout) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs) { \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<FResourceRequest>( \
"FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_arguments(NumpyBinaryScalarParam::__FIELDS__())
#if MXNET_USE_CUDA
struct BinaryScalarRTCCompute {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
};
struct BinaryScalarRTCBackward {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
mask_hash_mult.h | /**
* @file
* hash_mult.h
*
* @author
*
* @date
*
* @brief
* Masked SpGEMM with hash tables as accumulators
*
* @todo
*
* @note
*
*/
#include <algorithm>
#include <omp.h>
#include "CSR.h"
#include "mask_hash.h"
#include "utility.h"
// With bin
template <typename IT,
typename NT,
typename MultiplyOperation,
typename AddOperation>
void
mxm_hash_mask
(
const CSR<IT, NT> &A,
const CSR<IT, NT> &B,
CSR<IT, NT> &C,
const CSR<IT, NT> &M,
MultiplyOperation multop,
AddOperation addop,
unsigned threadCount
)
{
C.rows = A.rows;
C.cols = B.cols;
C.zerobased = true;
C.rowptr = my_malloc<IT>(C.rows + 1);
IT *row_nz = my_malloc<IT>(C.rows);
int numThreads;
#pragma omp parallel num_threads(threadCount)
{
numThreads = omp_get_num_threads();
}
BIN<IT, NT> bin(A.rows, IMB_PWMIN, numThreads);
/* Set max bin */
bin.set_max_bin(A.rowptr, A.colids, B.rowptr, C.rows, C.cols);
/* Create hash table (thread local) */
bin.create_local_hash_table(C.cols);
IT rowPerThread = (M.rows + numThreads -1) / numThreads;
#pragma omp parallel num_threads(threadCount)
{
IT i, tid, start_row, end_row, max_row = 0, ra;
tid = omp_get_thread_num();
start_row = bin.rows_offset[tid];
end_row = bin.rows_offset[tid + 1];
// start_row = rowPerThread * tid;
// end_row = min(rowPerThread * (tid+1), M.rows);
for (ra = start_row; ra < end_row; ++ra){
max_row = max(max_row, M.rowptr[ra+1] - M.rowptr[ra]);
}
map_lp<IT, bool> ht(max_row + 1);
for (ra = start_row; ra < end_row; ++ra)
// for (IT ra = 0; ra < A.rows; ++ra)
{
// map_lp<IT, bool> ht(M.rowptr[ra+1] - M.rowptr[ra] + 1);
for (IT cmptr = M.rowptr[ra]; cmptr < M.rowptr[ra+1]; ++cmptr)
ht.insert(M.colids[cmptr], false);
row_nz[ra] = 0;
for (IT captr = A.rowptr[ra]; captr < A.rowptr[ra+1]; ++captr)
{
IT rb = A.colids[captr];
for (IT cbptr = B.rowptr[rb]; cbptr < B.rowptr[rb+1]; ++cbptr)
{
auto hv = ht.find(B.colids[cbptr]);
if (hv != -1 && !ht[hv])
{
++row_nz[ra];
ht[hv] = true;
}
}
}
ht.reset();
}
}
scan(row_nz, C.rowptr, C.rows + 1);
my_free<IT>(row_nz);
C.nnz = C.rowptr[C.rows];
C.colids = my_malloc<IT>(C.nnz);
C.values = my_malloc<NT>(C.nnz);
#pragma omp parallel num_threads(threadCount)
{
IT i, tid, start_row, end_row, max_row = 0, ra;
tid = omp_get_thread_num();
start_row = bin.rows_offset[tid];
end_row = bin.rows_offset[tid + 1];
// start_row = rowPerThread * tid;
// end_row = min(rowPerThread * (tid+1), M.rows);
for (ra = start_row; ra < end_row; ++ra){
max_row = max(max_row, M.rowptr[ra+1] - M.rowptr[ra]);
}
map_lp<IT, NT, bool> ht(max_row + 1);
for (ra = start_row; ra < end_row; ++ra)
{
// map_lp<IT, NT, bool> ht(M.rowptr[ra+1] - M.rowptr[ra] + 1);
for (IT cmptr = M.rowptr[ra]; cmptr < M.rowptr[ra+1]; ++cmptr)
ht.insert(M.colids[cmptr], NT(), false);
for (IT captr = A.rowptr[ra]; captr < A.rowptr[ra+1]; ++captr)
{
IT rb = A.colids[captr];
for (IT cbptr = B.rowptr[rb]; cbptr < B.rowptr[rb+1]; ++cbptr)
{
auto hv = ht.find(B.colids[cbptr]);
if (hv != -1 && !ht.get2(hv))
{
ht.get1(hv) = multop(A.values[captr], B.values[cbptr]);
ht.get2(hv) = true;
}
else if (hv != -1 && ht.get2(hv))
ht.get1(hv) =
addop(ht.get1(hv),
multop(A.values[captr], B.values[cbptr]));
}
}
ht.gather(C.colids + C.rowptr[ra], C.values + C.rowptr[ra]);
}
}
return;
}
// notes (IN): parallelism over rows, HT allocation for each row
template <typename IT,
typename NT,
typename MultiplyOperation,
typename AddOperation>
void
mxm_hash_mask_wobin
(
const CSR<IT, NT> &A,
const CSR<IT, NT> &B,
CSR<IT, NT> &C,
const CSR<IT, NT> &M,
MultiplyOperation multop,
AddOperation addop,
unsigned threadCount
)
{
C.rows = A.rows;
C.cols = B.cols;
C.zerobased = true;
C.rowptr = my_malloc<IT>(C.rows + 1);
IT *row_nz = my_malloc<IT>(C.rows);
#pragma omp parallel for num_threads(threadCount)
for (IT ra = 0; ra < A.rows; ++ra)
{
map_lp<IT, bool> ht(M.rowptr[ra+1] - M.rowptr[ra] + 1);
for (IT cmptr = M.rowptr[ra]; cmptr < M.rowptr[ra+1]; ++cmptr)
ht.insert(M.colids[cmptr], false);
row_nz[ra] = 0;
for (IT captr = A.rowptr[ra]; captr < A.rowptr[ra+1]; ++captr)
{
IT rb = A.colids[captr];
for (IT cbptr = B.rowptr[rb]; cbptr < B.rowptr[rb+1]; ++cbptr)
{
auto hv = ht.find(B.colids[cbptr]);
if (hv != -1 && !ht[hv])
{
++row_nz[ra];
ht[hv] = true;
}
}
}
}
scan(row_nz, C.rowptr, C.rows + 1);
my_free<IT>(row_nz);
C.nnz = C.rowptr[C.rows];
C.colids = my_malloc<IT>(C.nnz);
C.values = my_malloc<NT>(C.nnz);
#pragma omp parallel for num_threads(threadCount)
for (IT ra = 0; ra < A.rows; ++ra)
{
map_lp<IT, NT, bool> ht(M.rowptr[ra+1] - M.rowptr[ra] + 1);
for (IT cmptr = M.rowptr[ra]; cmptr < M.rowptr[ra+1]; ++cmptr)
ht.insert(M.colids[cmptr], NT(), false);
for (IT captr = A.rowptr[ra]; captr < A.rowptr[ra+1]; ++captr)
{
IT rb = A.colids[captr];
for (IT cbptr = B.rowptr[rb]; cbptr < B.rowptr[rb+1]; ++cbptr)
{
auto hv = ht.find(B.colids[cbptr]);
if (hv != -1 && !ht.get2(hv))
{
ht.get1(hv) = multop(A.values[captr], B.values[cbptr]);
ht.get2(hv) = true;
}
else if (hv != -1 && ht.get2(hv))
ht.get1(hv) =
addop(ht.get1(hv),
multop(A.values[captr], B.values[cbptr]));
}
}
ht.gather(C.colids + C.rowptr[ra], C.values + C.rowptr[ra]);
}
return;
}
|
tiger_fmt_plug.c | /* Tiger cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_tiger;
#elif FMT_REGISTERS_H
john_register_one(&fmt_tiger);
#else
#include <string.h>
#include "arch.h"
#include "sph_tiger.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 1 - 235k
// 64 - 7723k
// 128 - 10311K
// 256 - 12043K
// 512 - 13543
// 1k - 14256k
// 2k - 14860k ** this one chosen
// 4k - 15093k
// 8k - 14935k
// 16k - 14931k
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 128
#else
#define OMP_SCALE (1024*2)
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_LABEL "Tiger"
#define FORMAT_NAME ""
#define FORMAT_TAG "$tiger$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "Tiger 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 24
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tiger_tests[] = {
{"3293AC630C13F0245F92BBB1766E16167A4E58492DDE73F3", ""},
{"$tiger$D981F8CB78201A950DCF3048751E441C517FCA1AA55A29F6", "message digest"},
{"$tiger$a90197a19d2872ed8a5d508ba5b42deecf08344cc9f42195", "12346789"},
{"$tiger$4a82b9bb5911e1eccfd27d90584903d568e4f96b4ecf0d97", "UPPERCASE"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (strlen(p) != BINARY_SIZE * 2)
return 0;
while(*p)
if (atoi16[ARCH_INDEX(*p++)]==0x7f)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + BINARY_SIZE*2 + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE*2 + 1);
strupr(out + TAG_LENGTH);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + TAG_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_tiger_context ctx;
sph_tiger_init(&ctx);
sph_tiger(&ctx, saved_key[index], strlen(saved_key[index]));
sph_tiger_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void tiger_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_tiger = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
tiger_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
tiger_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
floorplan.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/* Original code from the Application Kernel Matrix by Cray */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "app-desc.h"
#include "bots.h"
#define ROWS 64
#define COLS 64
#define DMAX 64
#define max(a, b) ((a > b) ? a : b)
#define min(a, b) ((a < b) ? a : b)
int solution = -1;
typedef int coor[2];
typedef char ibrd[ROWS][COLS];
typedef char (*pibrd)[COLS];
FILE * inputFile;
struct cell {
int n;
coor *alt;
int top;
int bot;
int lhs;
int rhs;
int left;
int above;
int next;
};
struct cell * gcells;
int MIN_AREA;
ibrd BEST_BOARD;
coor MIN_FOOTPRINT;
int N;
/* compute all possible locations for nw corner for cell */
static int starts(int id, int shape, coor *NWS, struct cell *cells) {
int i, n, top, bot, lhs, rhs;
int rows, cols, left, above;
/* size of cell */
rows = cells[id].alt[shape][0];
cols = cells[id].alt[shape][1];
/* the cells to the left and above */
left = cells[id].left;
above = cells[id].above;
/* if there is a vertical and horizontal dependence */
if ((left >= 0) && (above >= 0)) {
top = cells[above].bot + 1;
lhs = cells[left].rhs + 1;
bot = top + rows;
rhs = lhs + cols;
/* if footprint of cell touches the cells to the left and above */
if ((top <= cells[left].bot) && (bot >= cells[left].top) &&
(lhs <= cells[above].rhs) && (rhs >= cells[above].lhs))
{ n = 1; NWS[0][0] = top; NWS[0][1] = lhs; }
else { n = 0; }
/* if there is only a horizontal dependence */
} else if (left >= 0) {
/* highest initial row is top of cell to the left - rows */
top = max(cells[left].top - rows + 1, 0);
/* lowest initial row is bottom of cell to the left */
bot = min(cells[left].bot, ROWS);
n = bot - top + 1;
for (i = 0; i < n; i++) {
NWS[i][0] = i + top;
NWS[i][1] = cells[left].rhs + 1;
}
} else {
/* leftmost initial col is lhs of cell above - cols */
lhs = max(cells[above].lhs - cols + 1, 0);
/* rightmost initial col is rhs of cell above */
rhs = min(cells[above].rhs, COLS);
n = rhs - lhs + 1;
for (i = 0; i < n; i++) {
NWS[i][0] = cells[above].bot + 1;
NWS[i][1] = i + lhs;
} }
return (n);
}
/* lay the cell down on the board in the rectangular space defined
by the cells top, bottom, left, and right edges. If the cell can
not be layed down, return 0; else 1.
*/
static int lay_down(int id, ibrd board, struct cell *cells) {
int i, j, top, bot, lhs, rhs;
top = cells[id].top;
bot = cells[id].bot;
lhs = cells[id].lhs;
rhs = cells[id].rhs;
for (i = top; i <= bot; i++) {
for (j = lhs; j <= rhs; j++) {
if (board[i][j] == 0) board[i][j] = (char)id;
else return(0);
} }
return (1);
}
#define read_integer(file,var) \
if ( fscanf(file, "%d", &var) == EOF ) {\
bots_message(" Bogus input file\n");\
exit(-1);\
}
static void read_inputs() {
int i, j, n;
read_integer(inputFile,n);
N = n;
gcells = (struct cell *) malloc((n + 1) * sizeof(struct cell));
gcells[0].n = 0;
gcells[0].alt = 0;
gcells[0].top = 0;
gcells[0].bot = 0;
gcells[0].lhs = -1;
gcells[0].rhs = -1;
gcells[0].left = 0;
gcells[0].above = 0;
gcells[0].next = 0;
for (i = 1; i < n + 1; i++) {
read_integer(inputFile, gcells[i].n);
gcells[i].alt = (coor *) malloc(gcells[i].n * sizeof(coor));
for (j = 0; j < gcells[i].n; j++) {
read_integer(inputFile, gcells[i].alt[j][0]);
read_integer(inputFile, gcells[i].alt[j][1]);
}
read_integer(inputFile, gcells[i].left);
read_integer(inputFile, gcells[i].above);
read_integer(inputFile, gcells[i].next);
}
if (!feof(inputFile)) {
read_integer(inputFile, solution);
}
}
static void write_outputs() {
int i, j;
bots_message("Minimum area = %d\n\n", MIN_AREA);
for (i = 0; i < MIN_FOOTPRINT[0]; i++) {
for (j = 0; j < MIN_FOOTPRINT[1]; j++) {
if (BEST_BOARD[i][j] == 0) {bots_message(" ");}
else bots_message("%c", 'A' + BEST_BOARD[i][j] - 1);
}
bots_message("\n");
}
}
#ifdef MANUAL_CUTOFF
static int add_cell_ser (int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS) {
int i, j, nn, nn2, area;
ibrd board;
coor footprint, NWS[DMAX];
nn2 = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nn2 += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
struct cell *cells = CELLS;
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nn2 += add_cell_ser(cells[id].next, footprint, board,cells);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
return nn2;
}
#endif
#if defined(IF_CUTOFF)
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) {
int i, j, nn, area, nnc, nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task untied private(board, footprint,area) \
firstprivate(NWS,i,j,id,nn,level) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) \
if(level<bots_cutoff_value)
{
struct cell cells[N+1];
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells,level+1);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#elif defined(FINAL_CUTOFF)
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) {
int i, j, nn, area, nnc, nnl;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task untied private(footprint,area) \
firstprivate(NWS,i,j,id,nn,level,bots_cutoff_value) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) \
final(level >= bots_cutoff_value) mergeable
{
ibrd board;
struct cell *cells;
if ( omp_in_final() && level > bots_cutoff_value ) {
cells = CELLS;
} else {
cells = alloca(sizeof(struct cell)*(N+1));
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
}
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells,level+1);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#elif defined(MANUAL_CUTOFF)
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) {
int i, j, nn, area, nnc, nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task untied private(board, footprint,area) \
firstprivate(NWS,i,j,id,nn,level,bots_cutoff_value) shared(nnc) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,bots_verbose_mode)
{
struct cell *cells;
cells = alloca(sizeof(struct cell)*(N+1));
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
if(level+1 < bots_cutoff_value ) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells,level+1);
} else {
#pragma omp atomic
nnc += add_cell_ser(cells[id].next, footprint, board,cells);
}
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#else
static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS) {
int i, j, nn, area, nnc,nnl;
ibrd board;
coor footprint, NWS[DMAX];
nnc = nnl = 0;
/* for each possible shape */
for (i = 0; i < CELLS[id].n; i++) {
/* compute all possible locations for nw corner */
nn = starts(id, i, NWS, CELLS);
nnl += nn;
/* for all possible locations */
for (j = 0; j < nn; j++) {
#pragma omp task untied private(board, footprint,area) \
firstprivate(NWS,i,j,id,nn) \
shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode)
{
struct cell cells[N+1];
memcpy(cells,CELLS,sizeof(struct cell)*(N+1));
/* extent of shape */
cells[id].top = NWS[j][0];
cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1;
cells[id].lhs = NWS[j][1];
cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1;
memcpy(board, BOARD, sizeof(ibrd));
/* if the cell cannot be layed down, prune search */
if (! lay_down(id, board, cells)) {
bots_debug("Chip %d, shape %d does not fit\n", id, i);
goto _end;
}
/* calculate new footprint of board and area of footprint */
footprint[0] = max(FOOTPRINT[0], cells[id].bot+1);
footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1);
area = footprint[0] * footprint[1];
/* if last cell */
if (cells[id].next == 0) {
/* if area is minimum, update global values */
if (area < MIN_AREA) {
#pragma omp critical
if (area < MIN_AREA) {
MIN_AREA = area;
MIN_FOOTPRINT[0] = footprint[0];
MIN_FOOTPRINT[1] = footprint[1];
memcpy(BEST_BOARD, board, sizeof(ibrd));
bots_debug("N %d\n", MIN_AREA);
}
}
/* if area is less than best area */
} else if (area < MIN_AREA) {
#pragma omp atomic
nnc += add_cell(cells[id].next, footprint, board,cells);
/* if area is greater than or equal to best area, prune search */
} else {
bots_debug("T %d, %d\n", area, MIN_AREA);
}
_end:;
}
}
}
#pragma omp taskwait
return nnc+nnl;
}
#endif
ibrd board;
void floorplan_init (char *filename)
{
int i,j;
inputFile = fopen(filename, "r");
if(NULL == inputFile) {
bots_message("Couldn't open %s file for reading\n", filename);
exit(1);
}
/* read input file and initialize global minimum area */
read_inputs();
MIN_AREA = ROWS * COLS;
/* initialize board is empty */
for (i = 0; i < ROWS; i++)
for (j = 0; j < COLS; j++) board[i][j] = 0;
}
void compute_floorplan (void)
{
coor footprint;
/* footprint of initial board is zero */
footprint[0] = 0;
footprint[1] = 0;
bots_message("Computing floorplan ");
#pragma omp parallel
{
#pragma omp single
#if defined(MANUAL_CUTOFF) || defined(IF_CUTOFF) || defined(FINAL_CUTOFF)
bots_number_of_tasks = add_cell(1, footprint, board, gcells,0);
#else
bots_number_of_tasks = add_cell(1, footprint, board, gcells);
#endif
}
bots_message(" completed!\n");
}
void floorplan_end (void)
{
/* write results */
write_outputs();
}
int floorplan_verify (void)
{
if (solution != -1 )
return MIN_AREA == solution ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL;
else
return BOTS_RESULT_NA;
}
|
parallel_dgemm.c | /* Copyright 2018 Los Alamos National Laboratory
* Copyright 2009-2018 Purdue University and Purdue University Research Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*******************************************************************************
* This example demonstrates threading impact on computing real matrix product
* C=alpha*A*B+beta*C using Intel(R) MKL subroutine DGEMM, where A, B, and C
* are matrices and alpha and beta are double precision scalars.
*
* In this simple example, practices such as memory management, data alignment,
* and I/O that are necessary for good programming style and high Intel(R) MKL
* performance are omitted to improve readability.
********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <mkl.h>
#include <omp.h>
#include <string.h>
#include <sys/time.h>
double get_cur_time(){
struct timeval tv;
struct timezone tz;
double cur_time;
gettimeofday(&tv, &tz);
cur_time= tv.tv_sec + tv.tv_usec/1000000.0;
return cur_time;
}
/* Consider adjusting LOOP_COUNT based on the performance of your computer */
/* to make sure that total run time is at least 1 second */
#define LOOP_COUNT 50
int main(int argc, char** argv)
{
double time[128];
int N = 256;
int loop_cnt;
int nb_threads = 1;
//parse command line
for (int k = 1; k < argc; k++) {
if (!strcmp(argv[k], "-size")) {
N = atoi(argv[++k]);
}
if (!strcmp(argv[k], "-cnt")) {
loop_cnt = atoi(argv[++k]);
}
if (!strcmp(argv[k], "-thread")) {
nb_threads = atoi(argv[++k]);
}
}
omp_set_dynamic(0);
omp_set_num_threads(nb_threads);
#pragma omp parallel
{
int myid = omp_get_thread_num();
double *A = NULL;
double *B = NULL;
double *C = NULL;
int i, j, r, max_threads;
double alpha, beta;
double s_initial, s_elapsed;
loop_cnt = LOOP_COUNT;
alpha = 1.0; beta = 1.0;
A = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
B = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
C = (double *)mkl_malloc( N*N*sizeof( double ), 64 );
if (A == NULL || B == NULL || C == NULL) {
printf( "\n ERROR: Can't allocate memory for matrices. Aborting... \n\n");
if (A != NULL) mkl_free(A);
if (B != NULL) mkl_free(B);
if (C != NULL) mkl_free(C);
// return 1;
}
for (i = 0; i < (N * N); i++) {
A[i] = (double)(i+1);
B[i] = (double)(-i-1);
C[i] = 0.0;
}
mkl_set_num_threads(1);
// warmup
for (r = 0; r < 10; r++) {
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
N, N, N, alpha, A, N, B, N, beta, C, N);
}
s_initial = get_cur_time();
for (r = 0; r < loop_cnt; r++) {
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
N, N, N, alpha, A, N, B, N, beta, C, N);
}
s_elapsed = (get_cur_time() - s_initial) / loop_cnt;
time[myid] = s_elapsed * 1000;
printf ("Thread #%d, MKL DGEMM N=%d, time %.5f milliseconds, GFLOPS=%.3f\n",
myid, N, (s_elapsed * 1000), 2*(double)N*(double)N*(double)N/s_elapsed*1e-9);
mkl_free(A);
mkl_free(B);
mkl_free(C);
if (s_elapsed < 0.9/loop_cnt) {
s_elapsed=1.0/loop_cnt/s_elapsed;
i=(int)(s_elapsed*loop_cnt)+1;
if (myid == 0)
printf(" It is highly recommended to define LOOP_COUNT for this example on your \n"
" computer as %i to have total execution time about 1 second for reliability \n"
" of measurements\n\n", i);
}
}
// compute average
double average=0.0;
for (int i=0; i < nb_threads; i++)
average += time[i];
average = average / nb_threads;
printf(" AE= %.3f ms, Each thread uses %.3f MB\n",
average, (double)N*(double)N*sizeof(double)/(1024*1024));
printf (" Example completed. \n\n");
return 0;
} |
DRB018-plusplus-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Data race on outLen due to ++ operation.
Adding private (outLen) can avoid race condition. But it is wrong semantically.
Data races on outLen also cause output[outLen++] to have data races.
Data race pairs (we allow two pairs to preserve the original code pattern):
1. outLen@72 vs. outLen@72
2. output[]@72 vs. output[]@72
*/
#include <stdlib.h>
#include <stdio.h>
int input[1000];
int output[1000];
int main()
{
int i ;
int inLen=1000 ;
int outLen = 0;
#pragma omp parallel for
for (i=0; i<inLen; ++i)
input[i]= i;
for (i=0; i<inLen; ++i)
{
output[outLen++] = input[i];
}
printf("output[500]=%d\n",output[500]);
return 0;
}
|
ast-dump-openmp-task.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -fopenmp-version=50 -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
typedef unsigned long omp_event_handle_t;
void test() {
omp_event_handle_t evt;
#pragma omp task detach(evt)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <line:4:1, line:8:1> line:4:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:8:1>
// CHECK: `-OMPTaskDirective {{.*}} <line:6:1, col:29>
// CHECK-NEXT: |-OMPDetachClause {{.+}} <col:18, col:28>
// CHECK-NEXT: | `-DeclRefExpr {{.+}} <col:25> 'omp_event_handle_t':'unsigned long' lvalue Var {{.+}} 'evt' 'omp_event_handle_t':'unsigned long'
// CHECK-NEXT: |-OMPFirstprivateClause {{.+}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | `-DeclRefExpr {{.+}} <col:25> 'omp_event_handle_t':'unsigned long' lvalue Var {{.+}} 'evt' 'omp_event_handle_t':'unsigned long'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:7:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:6:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-task.c:6:1) *const restrict'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.