source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
fs_strategy_for_chimera.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Authors: Aditya Ghantasala, https://github.com/adityaghantasala
// Navaneeth K Narayanan
// Rishith Ellath Meethal
//
#ifndef KRATOS_FS_STRATEGY_FOR_CHIMERA_H
#define KRATOS_FS_STRATEGY_FOR_CHIMERA_H
#include "includes/define.h"
#include "utilities/openmp_utils.h"
// FluidDynamicsApp Includes
#include "custom_strategies/strategies/fractional_step_strategy.h"
// Application includes
#include "chimera_application_variables.h"
#include "custom_utilities/fractional_step_settings_for_chimera.h"
namespace Kratos {
///@addtogroup ChimeraApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
class FractionalStepStrategyForChimera : public FractionalStepStrategy<TSparseSpace,TDenseSpace,TLinearSolver>
{
public:
///@name Type Definitions
///@{
/// Counted pointer of FractionalStepStrategyForChimera
KRATOS_CLASS_POINTER_DEFINITION(FractionalStepStrategyForChimera);
typedef FractionalStepStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef FractionalStepSettingsForChimera<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
FractionalStepStrategyForChimera(ModelPart& rModelPart,
SolverSettingsType& rSolverConfig,
bool PredictorCorrector):
BaseType(rModelPart,rSolverConfig,PredictorCorrector)
{
KRATOS_WARNING("FractionalStepStrategyForChimera") << "This constructor is deprecated. Use the one with the \'CalculateReactionsFlag\' instead." << std::endl;
this->InitializeStrategy(rSolverConfig,PredictorCorrector);
}
FractionalStepStrategyForChimera(
ModelPart &rModelPart,
SolverSettingsType &rSolverConfig,
bool PredictorCorrector,
bool CalculateReactionsFlag)
: BaseType(rModelPart, rSolverConfig, PredictorCorrector, CalculateReactionsFlag)
{
this->InitializeStrategy(rSolverConfig,PredictorCorrector);
}
/// Destructor.
~FractionalStepStrategyForChimera() = default;
/// Assignment operator.
FractionalStepStrategyForChimera& operator=(FractionalStepStrategyForChimera const& rOther) = delete;
/// Copy constructor.
FractionalStepStrategyForChimera(FractionalStepStrategyForChimera const& rOther) = delete;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "FractionalStepStrategyForChimera" ;
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override {rOStream << "FractionalStepStrategyForChimera";}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
std::tuple<bool,double> SolveStep() override
{
double start_solve_time = OpenMPUtils::GetCurrentTime();
ModelPart& r_model_part = BaseType::GetModelPart();
// 1. Fractional step momentum iteration
r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,1);
bool converged = false;
for(std::size_t it = 0; it < BaseType::mMaxVelocityIter; ++it)
{
KRATOS_INFO("FRACTIONAL STEP :: ")<<it+1<<std::endl;
// build momentum system and solve for fractional step velocity increment
r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,1);
double norm_dv = BaseType::mpMomentumStrategy->Solve();
// Check convergence
converged = BaseType::CheckFractionalStepConvergence(norm_dv);
if (converged)
{
KRATOS_INFO_IF("FractionalStepStrategyForChimera ", BaseType::GetEchoLevel() > 0 )<<
"Fractional velocity converged in " << it+1 << " iterations." << std::endl;
break;
}
}
KRATOS_INFO_IF("FractionalStepStrategyForChimera ", (BaseType::GetEchoLevel() > 0) && !converged)<<
"Fractional velocity iterations did not converge "<< std::endl;
// Compute projections (for stabilization)
r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,4);
ComputeSplitOssProjections(r_model_part);
// 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT)
r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,5);
#pragma omp parallel
{
ModelPart::NodeIterator nodes_begin;
ModelPart::NodeIterator nodes_end;
OpenMPUtils::PartitionedIterators(r_model_part.Nodes(),nodes_begin,nodes_end);
for (ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node)
{
const double old_press = it_node->FastGetSolutionStepValue(PRESSURE);
it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -old_press;
}
}
KRATOS_INFO_IF("FractionalStepStrategyForChimera ", BaseType::GetEchoLevel() > 0 )<<
"Calculating Pressure."<< std::endl;
//double norm_dp = 0;
double norm_dp = BaseType::mpPressureStrategy->Solve();
#pragma omp parallel
{
ModelPart::NodeIterator nodes_begin;
ModelPart::NodeIterator nodes_end;
OpenMPUtils::PartitionedIterators(r_model_part.Nodes(),nodes_begin,nodes_end);
for (ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node)
it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) += it_node->FastGetSolutionStepValue(PRESSURE);
}
// 3. Compute end-of-step velocity
KRATOS_INFO_IF("FractionalStepStrategyForChimera ", BaseType::GetEchoLevel() > 0 )<<"Updating Velocity." << std::endl;
r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,6);
CalculateEndOfStepVelocity();
// Additional steps
for (std::vector<Process::Pointer>::iterator iExtraSteps = BaseType::mExtraIterationSteps.begin();
iExtraSteps != BaseType::mExtraIterationSteps.end(); ++iExtraSteps)
(*iExtraSteps)->Execute();
const double stop_solve_time = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("FractionalStepStrategyForChimera", BaseType::GetEchoLevel() >= 1) << "Time for solving step : " << stop_solve_time - start_solve_time << std::endl;
// Set the output tuple as the fractional velocity convergence and pressure norm
return std::make_tuple(converged, norm_dp);
}
void ComputeSplitOssProjections(ModelPart& rModelPart) override
{
const array_1d<double,3> zero(3,0.0);
array_1d<double,3> out(3,0.0);
#pragma omp parallel
{
ModelPart::NodeIterator nodes_begin;
ModelPart::NodeIterator nodes_end;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),nodes_begin,nodes_end);
for ( ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node )
{
it_node->FastGetSolutionStepValue(CONV_PROJ) = zero;
it_node->FastGetSolutionStepValue(PRESS_PROJ) = zero;
it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0;
it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
}
#pragma omp parallel
{
ModelPart::ElementIterator elem_begin;
ModelPart::ElementIterator elem_end;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(),elem_begin,elem_end);
for ( ModelPart::ElementIterator it_elem = elem_begin; it_elem != elem_end; ++it_elem )
{
it_elem->Calculate(CONV_PROJ,out,rModelPart.GetProcessInfo());
}
}
rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ);
rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
// If there are periodic conditions, add contributions from both sides to the periodic nodes
//PeriodicConditionProjectionCorrection(rModelPart);
ChimeraProjectionCorrection(rModelPart);
#pragma omp parallel
{
ModelPart::NodeIterator nodes_begin;
ModelPart::NodeIterator nodes_end;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),nodes_begin,nodes_end);
for ( ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node )
{
const double nodal_area = it_node->FastGetSolutionStepValue(NODAL_AREA);
if( nodal_area > mAreaTolerance )
{
it_node->FastGetSolutionStepValue(CONV_PROJ) /= nodal_area;
it_node->FastGetSolutionStepValue(PRESS_PROJ) /= nodal_area;
it_node->FastGetSolutionStepValue(DIVPROJ) /= nodal_area;
}
}
}
//For correcting projections for chimera
auto &r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part");
const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints();
for(const auto& constraint : r_constraints_container)
{
const auto& master_dofs = constraint.GetMasterDofsVector();
const auto& slave_dofs = constraint.GetSlaveDofsVector();
ModelPart::MatrixType r_relation_matrix;
ModelPart::VectorType r_constant_vector;
constraint.CalculateLocalSystem(r_relation_matrix,r_constant_vector,rModelPart.GetProcessInfo());
IndexType slave_i = 0;
for(const auto& slave_dof : slave_dofs)
{
const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
IndexType master_j = 0;
for(const auto& master_dof : master_dofs)
{
const auto master_node_id = master_dof->Id();
const double weight = r_relation_matrix(slave_i, master_j);
auto& r_master_node = rModelPart.Nodes()[master_node_id];
auto& conv_proj = r_slave_node.FastGetSolutionStepValue(CONV_PROJ);
auto& pres_proj = r_slave_node.FastGetSolutionStepValue(PRESS_PROJ);
auto& dive_proj = r_slave_node.FastGetSolutionStepValue(DIVPROJ);
auto& nodal_area = r_slave_node.FastGetSolutionStepValue(NODAL_AREA);
conv_proj += (r_master_node.FastGetSolutionStepValue(CONV_PROJ))*weight;
pres_proj += (r_master_node.FastGetSolutionStepValue(PRESS_PROJ))*weight;
dive_proj += (r_master_node.FastGetSolutionStepValue(DIVPROJ))*weight;
nodal_area += (r_master_node.FastGetSolutionStepValue(NODAL_AREA))*weight;
++master_j;
}
++slave_i;
}
}
}
void CalculateEndOfStepVelocity() override
{
ModelPart& r_model_part = BaseType::GetModelPart();
const array_1d<double,3> zero(3,0.0);
array_1d<double,3> out(3,0.0);
#pragma omp parallel
{
ModelPart::NodeIterator nodes_begin;
ModelPart::NodeIterator nodes_end;
OpenMPUtils::PartitionedIterators(r_model_part.Nodes(),nodes_begin,nodes_end);
for ( ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node )
{
it_node->FastGetSolutionStepValue(FRACT_VEL) = zero;
}
}
#pragma omp parallel
{
ModelPart::ElementIterator elem_begin;
ModelPart::ElementIterator elem_end;
OpenMPUtils::PartitionedIterators(r_model_part.Elements(),elem_begin,elem_end);
for ( ModelPart::ElementIterator it_elem = elem_begin; it_elem != elem_end; ++it_elem )
{
it_elem->Calculate(VELOCITY,out,r_model_part.GetProcessInfo());
}
}
r_model_part.GetCommunicator().AssembleCurrentData(FRACT_VEL);
//PeriodicConditionVelocityCorrection(r_model_part);
// Force the end of step velocity to verify slip conditions in the model
if (BaseType::mUseSlipConditions)
BaseType::EnforceSlipCondition(SLIP);
if (BaseType::mDomainSize == 2) InterpolateVelocity<2>(r_model_part);
if (BaseType::mDomainSize == 3) InterpolateVelocity<3>(r_model_part);
}
void ChimeraProjectionCorrection(ModelPart& rModelPart)
{
auto &r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part");
const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints();
for(const auto& constraint : r_constraints_container)
{
const auto& slave_dofs = constraint.GetSlaveDofsVector();
for(const auto& slave_dof : slave_dofs)
{
const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
r_slave_node.GetValue(NODAL_AREA)= 0;
r_slave_node.GetValue(CONV_PROJ)= array_1d<double,3>(3,0.0);
r_slave_node.GetValue(PRESS_PROJ)= array_1d<double,3>(3,0.0);
r_slave_node.GetValue(DIVPROJ)= 0 ;
}
}
for(const auto& constraint : r_constraints_container)
{
const auto& master_dofs = constraint.GetMasterDofsVector();
const auto& slave_dofs = constraint.GetSlaveDofsVector();
ModelPart::MatrixType r_relation_matrix;
ModelPart::VectorType r_constant_vector;
constraint.CalculateLocalSystem(r_relation_matrix,r_constant_vector,rModelPart.GetProcessInfo());
IndexType slave_i = 0;
for(const auto& slave_dof : slave_dofs)
{
const IndexType slave_node_id = slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
IndexType master_j = 0;
for(const auto& master_dof : master_dofs)
{
const IndexType master_node_id = master_dof->Id();
const double weight = r_relation_matrix(slave_i, master_j);
auto& r_master_node = rModelPart.Nodes()[master_node_id];
r_slave_node.GetValue(NODAL_AREA) +=(r_master_node.FastGetSolutionStepValue(NODAL_AREA))*weight;
r_slave_node.GetValue(CONV_PROJ) +=(r_master_node.FastGetSolutionStepValue(CONV_PROJ))*weight;
r_slave_node.GetValue(PRESS_PROJ) +=(r_master_node.FastGetSolutionStepValue(PRESS_PROJ))*weight;
r_slave_node.GetValue(DIVPROJ) +=(r_master_node.FastGetSolutionStepValue(DIVPROJ))*weight;
++master_j;
}
++slave_i;
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
for (auto it_node = rModelPart.NodesBegin(); it_node != rModelPart.NodesEnd(); it_node++)
{
if (it_node->GetValue(NODAL_AREA) > mAreaTolerance)
{
it_node->FastGetSolutionStepValue(NODAL_AREA) = it_node->GetValue(NODAL_AREA);
it_node->FastGetSolutionStepValue(CONV_PROJ) = it_node->GetValue(CONV_PROJ);
it_node->FastGetSolutionStepValue(PRESS_PROJ) = it_node->GetValue(PRESS_PROJ);
it_node->FastGetSolutionStepValue(DIVPROJ) = it_node->GetValue(DIVPROJ);
// reset for next iteration
it_node->GetValue(NODAL_AREA) = 0.0;
it_node->GetValue(CONV_PROJ) = array_1d<double,3>(3,0.0);
it_node->GetValue(PRESS_PROJ) = array_1d<double,3>(3,0.0);
it_node->GetValue(DIVPROJ) = 0.0;
}
}
}
void ChimeraVelocityCorrection(ModelPart& rModelPart)
{
auto &r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part");
const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints();
for(const auto& constraint : r_constraints_container)
{
const auto& slave_dofs = constraint.GetSlaveDofsVector();
for(const auto& slave_dof : slave_dofs)
{
const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
r_slave_node.FastGetSolutionStepValue(FRACT_VEL_X)=0;
r_slave_node.FastGetSolutionStepValue(FRACT_VEL_Y)=0;
}
}
for(const auto& constraint : r_constraints_container)
{
const auto& master_dofs = constraint.GetMasterDofsVector();
const auto& slave_dofs = constraint.GetSlaveDofsVector();
ModelPart::MatrixType r_relation_matrix;
ModelPart::VectorType r_constant_vector;
constraint.CalculateLocalSystem(r_relation_matrix,r_constant_vector,rModelPart.GetProcessInfo());
IndexType slave_i = 0;
for(const auto& slave_dof : slave_dofs)
{
const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
IndexType master_j = 0;
for(const auto& master_dof : master_dofs)
{
const auto master_node_id = master_dof->Id();
const double weight = r_relation_matrix(slave_i, master_j);
auto& r_master_node = rModelPart.Nodes()[master_node_id];
r_slave_node.GetValue(FRACT_VEL) +=(r_master_node.FastGetSolutionStepValue(FRACT_VEL))*weight;
++master_j;
}
++slave_i;
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL);
for (typename ModelPart::NodeIterator it_node = rModelPart.NodesBegin(); it_node != rModelPart.NodesEnd(); it_node++)
{
array_1d<double,3>& r_delta_vel = it_node->GetValue(FRACT_VEL);
if ( r_delta_vel[0]*r_delta_vel[0] + r_delta_vel[1]*r_delta_vel[1] + r_delta_vel[2]*r_delta_vel[2] != 0.0)
{
it_node->FastGetSolutionStepValue(FRACT_VEL) = it_node->GetValue(FRACT_VEL);
r_delta_vel = array_1d<double,3>(3,0.0);
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
const double mAreaTolerance=1E-12;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
template <int TDim>
void InterpolateVelocity(ModelPart& rModelPart)
{
#pragma omp parallel
{
ModelPart::NodeIterator nodes_begin;
ModelPart::NodeIterator nodes_end;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), nodes_begin, nodes_end);
for (ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node) {
const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA);
if (NodalArea > mAreaTolerance) {
if (!it_node->IsFixed(VELOCITY_X))
it_node->FastGetSolutionStepValue(VELOCITY_X) +=
it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea;
if (!it_node->IsFixed(VELOCITY_Y))
it_node->FastGetSolutionStepValue(VELOCITY_Y) +=
it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea;
if(TDim > 2)
if (!it_node->IsFixed(VELOCITY_Z))
it_node->FastGetSolutionStepValue(VELOCITY_Z) +=
it_node->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea;
}
}
}
auto& r_pre_modelpart =
rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part");
const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints();
for (const auto& constraint : r_constraints_container) {
const auto& slave_dofs = constraint.GetSlaveDofsVector();
for (const auto& slave_dof : slave_dofs) {
const auto slave_node_id =
slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
r_slave_node.FastGetSolutionStepValue(VELOCITY_X) = 0;
r_slave_node.FastGetSolutionStepValue(VELOCITY_Y) = 0;
if(TDim > 2)
r_slave_node.FastGetSolutionStepValue(VELOCITY_Z) = 0;
}
}
for (const auto& constraint : r_constraints_container) {
const auto& master_dofs = constraint.GetMasterDofsVector();
const auto& slave_dofs = constraint.GetSlaveDofsVector();
ModelPart::MatrixType r_relation_matrix;
ModelPart::VectorType r_constant_vector;
constraint.CalculateLocalSystem(r_relation_matrix, r_constant_vector,
rModelPart.GetProcessInfo());
IndexType slave_i = 0;
for (const auto& slave_dof : slave_dofs) {
const auto slave_node_id =
slave_dof->Id(); // DOF ID is same as node ID
auto& r_slave_node = rModelPart.Nodes()[slave_node_id];
IndexType master_j = 0;
for (const auto& master_dof : master_dofs) {
const auto master_node_id = master_dof->Id();
const double weight = r_relation_matrix(slave_i, master_j);
auto& r_master_node = rModelPart.Nodes()[master_node_id];
r_slave_node.FastGetSolutionStepValue(VELOCITY_X) +=
(r_master_node.FastGetSolutionStepValue(VELOCITY_X)) * weight;
r_slave_node.FastGetSolutionStepValue(VELOCITY_Y) +=
(r_master_node.FastGetSolutionStepValue(VELOCITY_Y)) * weight;
if(TDim > 2)
r_slave_node.FastGetSolutionStepValue(VELOCITY_Z) +=
(r_master_node.FastGetSolutionStepValue(VELOCITY_Z)) * weight;
++master_j;
}
++slave_i;
}
}
}
void InitializeStrategy(SolverSettingsType& rSolverConfig,
bool PredictorCorrector)
{
KRATOS_TRY;
BaseType::mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
BaseType::Check();
//ModelPart& rModelPart = BaseType::GetModelPart();
BaseType::mDomainSize = rSolverConfig.GetDomainSize();
BaseType::mPredictorCorrector = PredictorCorrector;
BaseType::mUseSlipConditions = rSolverConfig.UseSlipConditions();
BaseType::mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,BaseType::mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity,BaseType::mVelocityTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,BaseType::mMaxVelocityIter);
KRATOS_INFO("FractionalStepStrategyForChimera ")<<"Velcoity strategy successfully set !"<<std::endl;
}
else
{
KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Velocity strategy defined in FractionalStepSettings","");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,BaseType::mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure,BaseType::mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,BaseType::mMaxPressureIter);
KRATOS_INFO("FractionalStepStrategyForChimera ")<<"Pressure strategy successfully set !"<<std::endl;
}
else
{
KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Pressure strategy defined in FractionalStepSettings","");
}
// Check input parameters
BaseType::Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /// Class FStepStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_FS_STRATEGY_FOR_CHIMERA_H
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define MaxBezierCoordinates 2097152
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
static void
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info,ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
(void) QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
}
}
(void) QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
(void) SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
(void) QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=DrawImage(clip_mask,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
(void) SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
(void) QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=DrawImage(composite_mask,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
size_t
extent;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(size_t) mvg_info->offset+pad+4096;
if (extent <= *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=ResizeQuantumMemory(*mvg_info->primitive_info,
extent,sizeof(**mvg_info->primitive_info));
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
*mvg_info->extent=extent;
return(MagickTrue);
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=AcquireCriticalMemory(
sizeof(**mvg_info->primitive_info));
(void) memset(*mvg_info->primitive_info,0,sizeof(**mvg_info->primitive_info));
*mvg_info->extent=1;
return(MagickFalse);
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (*token == '\0')
break;
if (*token == '#')
{
/*
Skip comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
GetNextToken(q,&q,extent,token);
start=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=0;
for (p=q; *q != '\0'; )
{
GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (*token == '#')
{
/*
Skip comment.
*/
while ((*p != '\n') && (*p != '\0'))
p++;
continue;
}
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if (n < 0)
{
char
*macro;
/*
Extract macro.
*/
GetNextToken(p,&p,extent,token);
macro=AcquireString(start);
macro[end-start]='\0';
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
macro=DestroyString(macro);
break;
}
}
}
}
}
token=DestroyString(token);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(status);
}
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if ((strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-'))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=4096;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.offset=0;
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (draw_info->compliance != SVGCompliance)
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (draw_info->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(draw_info->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
const char
*clip_path;
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
clip_path=(const char *) GetValueFromSplayTree(macros,name);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,name,clip_path);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+2UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2UL*x+2UL)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
/* affine.tx+=cursor; */
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
DrawInfo
*clone_info;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=DrawImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,4096);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(&mvg_info,primitive_info[j].point,primitive_info[j+1].point,
primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
DrawInfo
*clone_info;
TypeMetric
metrics;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if (clone_info->density != (char *) NULL)
clone_info->density=DestroyString(clone_info->density);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
(void) ConcatenateString(&clone_info->text," ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (draw_info->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (draw_info->compliance == SVGCompliance)
{
status=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_image=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
{
status=0;
break;
}
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap,exception);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p,exception);
DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
TraceEllipse(mvg_info,center,radius,degrees);
}
static void TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+MagickEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(mvg_info,4);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(MVGInfo *mvg_info,const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
return;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(mvg_info,start,offset,degrees);
}
static void TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
extent;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return;
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4.0*(MagickPI*PerceptibleReciprocal(delta)/2.0));
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
extent=(size_t) ceil((angle.y-angle.x)/step)+1;
if (CheckPrimitiveExtent(mvg_info,extent) == MagickFalse)
return;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,4);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,3);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,4);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,3);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
if ((fabs(start.x-end.x) < MagickEpsilon) ||
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->coordinates=0;
return;
}
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return;
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return;
p=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(p,(*mvg_info->primitive_info+offset)->point);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
filter_codelet.c | #ifdef USE_OMP
#include <omp.h>
#endif
#include "filter.h"
void filter_codelet(int *matrix_ptr1, int *matrix_ptr2)
{
int i;
int temp1;
int v1;
v1 = *matrix_ptr1++;
#ifdef USE_OMP
#pragma omp parallel for
#endif
for (i = 0; i < N * N; i++) {
temp1 = abs(v1);
*matrix_ptr2++ = (temp1 > T) ? 255 : 0;
v1 = *matrix_ptr1++;
}
}
|
RCCE_malloc.c | //***************************************************************************************
// MPB memory allocation routines.
//***************************************************************************************
//
// Author: Rob F. Van der Wijngaart
// Intel Corporation
// Date: 008/30/2010
//
//***************************************************************************************
//
// Copyright 2010 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "RCCE_lib.h"
//......................................................................................
// GLOBAL VARIABLES USED BY THE LIBRARY
//......................................................................................
static RCCE_BLOCK_S RCCE_space; // data structure used for trscking MPB memory blocks
static RCCE_BLOCK_S *RCCE_spacep; // pointer to RCCE_space
#ifdef _OPENMP
#pragma omp threadprivate (RCCE_space, RCCE_spacep)
#endif
// END GLOBAL VARIABLES USED BY THE LIBRARY
//......................................................................................
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_malloc_init
//--------------------------------------------------------------------------------------
// initialize memory allocator
//--------------------------------------------------------------------------------------
void RCCE_malloc_init(
t_vcharp mem, // pointer to MPB space that is to be managed by allocator
size_t size // size (bytes) of managed space
) {
#ifndef GORY
// in the simplified API MPB memory allocation merely uses running pointers
RCCE_flags_start = mem;
RCCE_chunk = size;
RCCE_buff_ptr = mem;
#else
// create one block containing all memory for truly dynamic memory allocator
RCCE_spacep = &RCCE_space;
RCCE_spacep->tail = (RCCE_BLOCK *) malloc(sizeof(RCCE_BLOCK));
RCCE_spacep->tail->free_size = size;
RCCE_spacep->tail->space = mem;
/* make a circular list by connecting tail to itself */
RCCE_spacep->tail->next = RCCE_spacep->tail;
#endif
}
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_malloc
//--------------------------------------------------------------------------------------
// Allocate memory inside MPB. In restricted mode we only use it to allocate new
// flags prompted by the creation of new communicators. Since communicators are never
// deleted, we do not need to deallocate MPB memory, so we can simply keep running
// pointers of where the next flag will be stored, and where payload data can go. In
// GORY mode we need to support fully dynamic memory allocation and deallocation.
//--------------------------------------------------------------------------------------
t_vcharp RCCE_malloc(
size_t size // requested space
) {
t_vcharp result;
#ifndef GORY
// new flag takes exactly one cache line, whether using single bit flags are not
if (size != RCCE_LINE_SIZE) {
fprintf(stderr, "ERROR in RCCE_malloc(): size != RCCE_LINE_SIZE!\n");
exit(-1);
return(0);
}
// if chunk size becomes zero, we have allocated too many flags
if (!(RCCE_chunk-RCCE_LINE_SIZE)) {
fprintf(stderr, "ERROR in RCCE_malloc(): No more MPB space left!\n");
exit(-1);
return(0);
}
result = RCCE_flags_start;
// reduce maximum size of message payload chunk
RCCE_chunk -= RCCE_LINE_SIZE;
// move running pointer to next available flags line
RCCE_flags_start += RCCE_LINE_SIZE;
// move running pointer to new start of payload data area
RCCE_buff_ptr += RCCE_LINE_SIZE;
return(result);
#else
// simple memory allocator, loosely based on public domain code developed by
// Michael B. Allen and published on "The Scripts--IT /Developers Network".
// Approach:
// - maintain linked list of pointers to memory. A block is either completely
// malloced (free_size = 0), or completely free (free_size > 0).
// The space field always points to the beginning of the block
// - malloc: traverse linked list for first block that has enough space
// - free: Check if pointer exists. If yes, check if the new block should be
// merged with neighbors. Could be one or two neighbors.
RCCE_BLOCK *b1, *b2, *b3; // running pointers for blocks
if (size==0 || size%RCCE_LINE_SIZE!=0) return 0;
// always first check if the tail block has enough space, because that
// is the most likely. If it does and it is exactly enough, we still
// create a new block that will be the new tail, whose free space is
// zero. This acts as a marker of where free space of predecessor ends
b1 = RCCE_spacep->tail;
if (b1->free_size >= size) {
// need to insert new block; new order is: b1->b2 (= new tail)
b2 = (RCCE_BLOCK *) malloc(sizeof(RCCE_BLOCK));
b2->next = b1->next;
b1->next = b2;
b2->free_size = b1->free_size-size;
b2->space = b1->space + size;
b1->free_size = 0;
// need to update the tail
RCCE_spacep->tail = b2;
return(b1->space);
}
// tail didn't have enough space; loop over whole list from beginning
while (b1->next->free_size < size) {
if (b1->next == RCCE_spacep->tail) {
return NULL; // we came full circle
}
b1 = b1->next;
}
b2 = b1->next;
if (b2->free_size > size) { // split block; new block order: b1->b2->b3
b3 = (RCCE_BLOCK *) malloc(sizeof(RCCE_BLOCK));
b3->next = b2->next; // reconnect pointers to add block b3
b2->next = b3; // " " " " " "
b3->free_size = b2->free_size - size; // b3 gets remainder free space
b3->space = b2->space + size; // need to shift space pointer
}
b2->free_size = 0; // block b2 is completely used
return (b2->space);
#endif
}
t_vcharp RCCE_palloc(
size_t size, // requested space
int CoreID // location
) {
t_vcharp result = RCCE_malloc(size);
if (result)
result = RCCE_comm_buffer[CoreID]+(result-RCCE_comm_buffer[RCCE_IAM]);
return result;
}
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_free
//--------------------------------------------------------------------------------------
// Deallocate memory in MPB; only used in GORY mode
//--------------------------------------------------------------------------------------
void RCCE_free(
t_vcharp ptr // pointer to data to be freed
) {
RCCE_BLOCK *b1, *b2, *b3; // running block pointers
int j1, j2; // booleans determining merging of blocks
// loop over whole list from the beginning until we locate space ptr
b1 = RCCE_spacep->tail;
while (b1->next->space != ptr && b1->next != RCCE_spacep->tail) {
b1 = b1->next;
}
// b2 is target block whose space must be freed
b2 = b1->next;
// tail either has zero free space, or hasn't been malloc'ed
if (b2 == RCCE_spacep->tail) return;
// reset free space for target block (entire block)
b3 = b2->next;
b2->free_size = b3->space - b2->space;
// determine with what non-empty blocks the target block can be merged
j1 = (b1->free_size>0 && b1!=RCCE_spacep->tail); // predecessor block
j2 = (b3->free_size>0 || b3==RCCE_spacep->tail); // successor block
if (j1) {
if (j2) { // splice all three blocks together: (b1,b2,b3) into b1
b1->next = b3->next;
b1->free_size += b3->free_size + b2->free_size;
if (b3==RCCE_spacep->tail) RCCE_spacep->tail = b1;
free(b3);
}
else { // only merge (b1,b2) into b1
b1->free_size += b2->free_size;
b1->next = b3;
}
free(b2);
}
else {
if (j2) { // only merge (b2,b3) into b2
b2->next = b3->next;
b2->free_size += b3->free_size;
if (b3==RCCE_spacep->tail) RCCE_spacep->tail = b2;
free(b3);
}
}
}
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_malloc_request
//--------------------------------------------------------------------------------------
// this function tries to return a (padded) amount of space in the MPB of size
// "size" bytes. If not available, the function keeps halving space until it fits
//--------------------------------------------------------------------------------------
t_vcharp RCCE_malloc_request(
size_t size, // requested number of bytes
size_t *chunk // number of bytes of space returned
) {
t_vcharp combuf;
combuf = 0;
*chunk = PAD32byte(size);
while (!combuf && *chunk >= RCCE_LINE_SIZE) {
combuf = RCCE_malloc(*chunk);
if (!combuf) *chunk = PAD32byte(*chunk/2);
}
return (combuf);
}
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,sizeof(distort_args));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,
exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
PixelInfo
invalid; /* the color to assign when distort result is invalid */
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
register ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0],coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3],coeff[4],coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates,
so that is what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel color to assign to distorted image */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DistortImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
register ssize_t
i;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SparseColorTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
morn_tensor.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_tensor.h"
struct HandleTensorCreate
{
MTensor *tns;
int batch;
int size;
int device;
float **data;
MMemory *memory;
float **backup_data;
MMemory *backup_memory;
};
#define HASH_TensorCreate 0x6b6cf658
void endTensorCreate(void *info)
{
struct HandleTensorCreate *handle = (struct HandleTensorCreate *)info;
mException((handle->tns ==NULL),EXIT,"invalid tensor");
if(!INVALID_POINTER(handle->data)) free(handle->data);
if(!INVALID_POINTER(handle->memory)) mMemoryRelease(handle->memory);
if(!INVALID_POINTER(handle->backup_data)) free(handle->backup_data);
if(!INVALID_POINTER(handle->backup_memory)) mMemoryRelease(handle->backup_memory);
free(handle->tns);
}
MTensor *TensorCreate(int batch,int channel,int height,int width,float **data,int device)
{
MTensor *tns = (MTensor *)malloc(sizeof(MTensor));
memset(tns,0,sizeof(MTensor));
if(batch <0) {batch = 0; } tns->batch = batch;
if(channel<0) {channel= 0; } tns->channel= channel;
if(height <0) {height = 0; } tns->height = height;
if(width <0) {width = 0; } tns->width = width;
if(device <0) {device = MORN_HOST;} tns->device = MORN_HOST;
tns->handle = mHandleCreate();
MHandle *hdl=mHandle(tns,TensorCreate);
struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(hdl->handle);
handle->tns = tns;
int size = channel*height*width;
if((batch==0)||(size == 0))
{
mException((!INVALID_POINTER(data)),EXIT,"invalid input");
return tns;
}
size = size+8;
handle->batch = batch;
handle->data = (float **)malloc(batch*sizeof(float *));
tns->data = handle->data;
if(!INVALID_POINTER(data))
{
handle->size = 0;
memcpy(handle->data,data,batch*sizeof(float *));
return tns;
}
handle->size= size;
handle->device = device;
handle->memory = mMemoryCreate(batch,size*sizeof(float),device);
void ***idx = malloc(batch*sizeof(void **));
for(int i=0;i<batch;i++) idx[i]=(void **)(&(handle->data[i]));
mMemoryIndex(handle->memory,1,size*sizeof(float),idx,batch);
free(idx);
for(int b=0;b<batch;b++) tns->data[b][channel*height*width]=1.0f;
return tns;
}
void mTensorRelease(MTensor *tns)
{
mException(INVALID_POINTER(tns),EXIT,"invalid input");
if(!INVALID_POINTER(tns->handle))
mHandleRelease(tns->handle);
}
MMemoryBlock *mTensorMemory(MTensor *tns,int batch)
{
int size = tns->channel*tns->height*tns->width+8;
float *data = tns->data[batch];
struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(((MHandle *)(tns->handle->data[0]))->handle);
if(handle->memory == NULL) handle->memory = mMemoryCreate(batch,size*sizeof(float),tns->device);
MMemoryBlock *mem = handle->memory->data[batch];
if(mem->size<size)
{
void ***idx = malloc(batch*sizeof(void **));
for(int i=0;i<batch;i++) idx[i]=(void **)(&(handle->data[i]));
mMemoryIndex(handle->memory,1,size*sizeof(float),idx,batch);
free(idx);
}
if(mem->data!=data) memcpy(mem->data,data,size*sizeof(float));
return mem;
}
void TensorRedefine(MTensor *tns,int batch,int channel,int height,int width,float **data,int device)
{
mException((INVALID_POINTER(tns)),EXIT,"invalid input");
if(batch <= 0) batch = tns->batch;
if(channel<= 0) channel= tns->channel;
if(height <= 0) height = tns->height;
if(width <= 0) width = tns->width;
if(INVALID_POINTER(data)) data=tns->data;
int size = channel*height*width+8;
if((batch!=tns->batch)||(channel!=tns->channel)||(height!=tns->height)||(width!=tns->width))
mHandleReset(tns->handle);
int same_size = (batch<=tns->batch)&&(size<tns->channel*tns->height*tns->width)&&(data==tns->data);
tns->batch=batch; tns->height=height; tns->width=width; tns->channel=channel;
if(same_size&&(data==NULL)) goto tensor_redefine_end;
if(same_size&&((device<0)||(device==mMemoryBlock(data[0])->device))) goto tensor_redefine_end;
struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(((MHandle *)(tns->handle->data[0]))->handle);
if(device<0)
{
if((data!=tns->data)&&(data!=NULL)) device=mMemoryBlock(data[0])->device;
else device=handle->device;
}
if((data!=tns->data)&&(data!=NULL))
{
for(int bc=0;bc<batch;bc++)
mException(mMemoryBlock(data[bc])->device!=device,EXIT,"invalid data device");
}
if((batch<=handle->batch)&&(size<=handle->size)&&(data==handle->data)&&(device==handle->device)) return;
// int flag = (tns->batch)&&(tns->channel)&&(tns->height)&&(tns->width);
// mException(reuse&&flag&&(handle->size==0),EXIT,"invalid redefine");
if((batch==0)||(size<=8))
{
mException((data!=tns->data),EXIT,"invalid input");
tns->data=NULL;
goto tensor_redefine_end;
}
if(batch>handle->batch){if(handle->data != NULL) {free(handle->data);}handle->data=NULL;}
if(handle->data==NULL)
{
handle->data = (float **)malloc(batch*sizeof(float *));
handle->batch = batch;
}
if(data!=tns->data)
{
memcpy(handle->data,data,batch*sizeof(float *));
tns->data = handle->data;
if(!INVALID_POINTER(handle->backup_data)) free(handle->backup_data);
if(!INVALID_POINTER(handle->backup_memory)) mMemoryRelease(handle->backup_memory);
goto tensor_redefine_end;
}
if(handle->memory == NULL) handle->memory = mMemoryCreate(batch,size*sizeof(float),device);
else mMemoryRedefine(handle->memory,batch,size*sizeof(float),device);
void ***idx = malloc(batch*sizeof(void **));
for(int i=0;i<batch;i++) idx[i]=(void **)(&(handle->data[i]));
mMemoryIndex(handle->memory,1,size*sizeof(float),idx,batch);
free(idx);
tns->data = handle->data;
handle->size = size;
handle->device= device;
tensor_redefine_end:
for(int b=0;b<batch;b++) tns->data[b][channel*height*width]=1.0f;
}
float **mTensorBackup(MTensor *tns,int batch,int cn,int height,int width)
{
if(batch <=0) batch =tns->batch;
if(cn <=0) cn =tns->channel;
if(height<=0) height=tns->height;
if(width <=0) width =tns->width;
int size = cn*height*width;
struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(((MHandle *)(tns->handle->data[0]))->handle);
if(handle->backup_data!=NULL) free(handle->backup_data);
handle->backup_data = (float **)malloc(batch*sizeof(float *));
if(handle->backup_memory == NULL) handle->backup_memory = mMemoryCreate(batch,size*sizeof(float),tns->device);
else mMemoryRedefine(handle->backup_memory,batch,size*sizeof(float),tns->device);
for(int i=0;i<batch;i++) handle->backup_data[i] = (float *)(handle->backup_memory->data[i]);
return (handle->backup_data);
}
void MemCopy(void *dst,int dst_dev,void *src,int src_dev,int size);
void mTensorCopy(MTensor *src,MTensor *dst,int device)
{
mException(INVALID_POINTER(src),EXIT,"invalid input source tensor");
mException(INVALID_POINTER(dst)&&(device<0),EXIT,"invalid input device");
if(device<0) device=dst->device;
float **dst_data;
int flag = (INVALID_POINTER(dst))||(dst==src);
if(flag) {if(device==src->device){return;} dst_data=mTensorBackup(src,DFLT,DFLT,DFLT,DFLT);}
else {if(device!=dst->device){mTensorRedefine(dst,DFLT,DFLT,DFLT,DFLT,NULL,device);} dst_data=dst->data;}
// int size = src->channel*src->height*src->width;
// for(int i=0;i<src->batch;i++)
// MemCopy(dst_data[i],device,src->data[i],src->device,size*sizeof(float));
if(flag) mTensorRedefine(src,DFLT,DFLT,DFLT,DFLT,dst_data,device);
}
/*
void mTensorAdd(MTensor *src1,MTensor *src2,MTensor *dst)
{
int i;
mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source");
int batch = src1->batch;
mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source");
mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input");
int channel = src1->channel;
int height = src1->height;
int width = src1->width;
mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source");
int size = channel*height*width;
if(dst==NULL) dst = src1;
if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data);
for(int b=0;b<batch;b++)
{
float *data1 = src1->data[b];
float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0];
float *data = dst ->data[b];
#pragma omp parallel for
for(i=0;i<size;i++)
data[i] = data1[i]+data2[i];
}
}
void mTensorSub(MTensor *src1,MTensor *src2,MTensor *dst)
{
int i;
mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source");
int batch = src1->batch;
mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source");
mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input");
int channel = src1->channel;
int height = src1->height;
int width = src1->width;
mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source");
int size = channel*height*width;
if(dst==NULL) dst = src1;
if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data);
for(int b=0;b<batch;b++)
{
float *data1 = src1->data[b];
float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0];
float *data = dst ->data[b];
#pragma omp parallel for
for(i=0;i<size;i++)
data[i] = data1[i]-data2[i];
}
}
void mTensorScalarMul(MTensor *src1,MTensor *src2,MTensor *dst)
{
int i;
mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source");
int batch = src1->batch;
mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source");
mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input");
int channel = src1->channel;
int height = src1->height;
int width = src1->width;
mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source");
int size = channel*height*width;
if(dst==NULL) dst = src1;
if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data);
for(int b=0;b<batch;b++)
{
float *data1 = src1->data[b];
float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0];
float *data = dst ->data[b];
#pragma omp parallel for
for(i=0;i<size;i++)
data[i] = data1[i]*data2[i];
}
}
void mTensorScalarDiv(MTensor *src1,MTensor *src2,MTensor *dst)
{
int i;
mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source");
int batch = src1->batch;
mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source");
mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input");
int channel = src1->channel;
int height = src1->height;
int width = src1->width;
mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source");
int size = channel*height*width;
if(dst==NULL) dst = src1;
if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data);
for(int b=0;b<batch;b++)
{
float *data1 = src1->data[b];
float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0];
float *data = dst ->data[b];
#pragma omp parallel for
for(i=0;i<size;i++)
data[i] = data1[i]/data2[i];
}
}
*/
void mTensorOperate(MTensor *src,MTensor *dst, float (*func)(float))
{
int i;
mException(INVALID_TENSOR(src),EXIT,"invalid input source");
if(dst==NULL) dst = src;
if(dst!=src ) mTensorRedefine(dst,src->batch,src->channel,src->height,src->width,dst->data);
int size = src->channel*src->height*src->width;
for(int b=0;b<src->batch;b++)
{
#pragma omp parallel for
for(i=0;i<size;i++)
dst->data[b][i] = func(src->data[b][i]);
}
}
|
core_ztsmlq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_tsmlq
*
* Overwrites the general complex m1-by-n1 tile A1 and
* m2-by-n2 tile A2 with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q
* | A2 |
*
* trans = Plasma_ConjTrans Q^H * | A1 | | A1 A2 | * Q^H
* | A2 |
*
* where Q is a complex unitary matrix defined as the product of k
* elementary reflectors
*
* Q = H(k)^H . . . H(2)^H H(1)^H
*
* as returned by core_ztslqt.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft : apply Q or Q^H from the Left;
* - PlasmaRight : apply Q or Q^H from the Right.
*
* @param[in] trans
* - PlasmaNoTrans : Apply Q;
* - Plasma_ConjTrans : Apply Q^H.
*
* @param[in] m1
* The number of rows of the tile A1. m1 >= 0.
*
* @param[in] n1
* The number of columns of the tile A1. n1 >= 0.
*
* @param[in] m2
* The number of rows of the tile A2. m2 >= 0.
* m2 = m1 if side == PlasmaRight.
*
* @param[in] n2
* The number of columns of the tile A2. n2 >= 0.
* n2 = n1 if side == PlasmaLeft.
*
* @param[in] k
* The number of elementary reflectors whose product defines
* the matrix Q.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m1-by-n1 tile A1.
* On exit, A1 is overwritten by the application of Q.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m1).
*
* @param[in,out] A2
* On entry, the m2-by-n2 tile A2.
* On exit, A2 is overwritten by the application of Q.
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m2).
*
* @param[in] V
* The i-th row must contain the vector which defines the
* elementary reflector H(i), for i = 1,2,...,k, as returned by
* core_ztslqt in the first k rows of its array argument V.
*
* @param[in] ldv
* The leading dimension of the array V. ldv >= max(1,k).
*
* @param[in] T
* The ib-by-k triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param work
* Auxiliary workspace array of length
* ldwork-by-m1 if side == PlasmaLeft
* ldwork-by-ib if side == PlasmaRight
*
* @param[in] ldwork
* The leading dimension of the array work.
* ldwork >= max(1,ib) if side == PlasmaLeft
* ldwork >= max(1,n1) if side == PlasmaRight
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
int core_ztsmlq(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
const plasma_complex64_t *V, int ldv,
const plasma_complex64_t *T, int ldt,
plasma_complex64_t *work, int ldwork)
{
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
coreblas_error("illegal value of side");
return -1;
}
if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) {
coreblas_error("illegal value of trans");
return -2;
}
if (m1 < 0) {
coreblas_error("illegal value of m1");
return -3;
}
if (n1 < 0) {
coreblas_error("illegal value of n1");
return -4;
}
if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) {
coreblas_error("illegal value of m2");
return -5;
}
if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) {
coreblas_error("illegal value of n2");
return -6;
}
if (k < 0 ||
(side == PlasmaLeft && k > m1 ) ||
(side == PlasmaRight && k > n1)) {
coreblas_error("illegal value of k");
return -7;
}
if (ib < 0) {
coreblas_error("illegal value of ib");
return -8;
}
if (A1 == NULL) {
coreblas_error("NULL A1");
return -9;
}
if (lda1 < imax(1, m1)) {
coreblas_error("illegal value of lda1");
return -10;
}
if (A2 == NULL) {
coreblas_error("NULL A2");
return -11;
}
if (lda2 < imax(1, m2)) {
coreblas_error("illegal value of lda2");
return -12;
}
if (V == NULL) {
coreblas_error("NULL V");
return -13;
}
if (ldv < imax(1, k)) {
coreblas_error("illegal value of ldv");
return -14;
}
if (T == NULL) {
coreblas_error("NULL T");
return -15;
}
if (ldt < imax(1, ib)) {
coreblas_error("illegal value of ldt");
return -16;
}
if (work == NULL) {
coreblas_error("NULL work");
return -17;
}
if (ldwork < imax(1, side == PlasmaLeft ? ib : n1)) {
coreblas_error("illegal value of ldwork");
return -18;
}
// quick return
if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0)
return PlasmaSuccess;
int i1, i3;
if ((side == PlasmaLeft && trans == PlasmaNoTrans) ||
(side == PlasmaRight && trans != PlasmaNoTrans)) {
i1 = 0;
i3 = ib;
}
else {
i1 = ((k-1)/ib)*ib;
i3 = -ib;
}
if (trans == PlasmaNoTrans)
trans = Plasma_ConjTrans;
else
trans = PlasmaNoTrans;
for (int i = i1; i > -1 && i < k; i += i3) {
int kb = imin(ib, k-i);
int ic = 0;
int jc = 0;
int mi = m1;
int ni = n1;
if (side == PlasmaLeft) {
// H or H^H is applied to C(i:m,1:n).
mi = m1 - i;
ic = i;
}
else {
// H or H^H is applied to C(1:m,i:n).
ni = n1 - i;
jc = i;
}
// Apply H or H^H.
core_zparfb(side, trans, PlasmaForward, PlasmaRowwise,
mi, ni, m2, n2, kb, 0,
&A1[lda1*jc+ic], lda1,
A2, lda2,
&V[i], ldv,
&T[ldt*i], ldt,
work, ldwork);
}
return PlasmaSuccess;
}
/******************************************************************************/
void core_omp_ztsmlq(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
plasma_complex64_t *A1, int lda1,
plasma_complex64_t *A2, int lda2,
const plasma_complex64_t *V, int ldv,
const plasma_complex64_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n1]) \
depend(inout:A2[0:lda2*n2]) \
depend(in:V[0:ldv*n2]) \
depend(in:T[0:ib*k])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid];
int ldwork = side == PlasmaLeft ? ib : n1; // TODO: double check
// Call the kernel.
int info = core_ztsmlq(side, trans,
m1, n1, m2, n2, k, ib,
A1, lda1,
A2, lda2,
V, ldv,
T, ldt,
W, ldwork);
if (info != PlasmaSuccess) {
plasma_error("core_ztsmlq() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
Mapping.h | //===--------- Mapping.h - OpenMP device runtime mapping helpers -- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_MAPPING_H
#define OMPTARGET_MAPPING_H
#include "Types.h"
namespace _OMP {
namespace mapping {
#pragma omp begin declare target device_type(nohost)
inline constexpr uint32_t MaxThreadsPerTeam = 1024;
#pragma omp end declare target
/// Initialize the mapping machinery.
void init(bool IsSPMD);
/// Return true if the kernel is executed in SPMD mode.
bool isSPMDMode();
/// Return true if the kernel is executed in generic mode.
bool isGenericMode();
/// Return true if the executing thread is the main thread in generic mode.
/// These functions will lookup state and it is required that that is OK for the
/// thread and location. See also `isInitialThreadInLevel0` for a stateless
/// alternative for certain situations, e.g. during initialization.
bool isMainThreadInGenericMode();
bool isMainThreadInGenericMode(bool IsSPMD);
/// Return true if this thread is the initial thread in parallel level 0.
///
/// The thread for which this returns true should be used for single threaded
/// initialization tasks. We pick a special thread to ensure there are no
/// races between the initialization and the first read of initialized state.
bool isInitialThreadInLevel0(bool IsSPMD);
/// Return true if the executing thread has the lowest Id of the active threads
/// in the warp.
bool isLeaderInWarp();
/// Return a mask describing all active threads in the warp.
LaneMaskTy activemask();
/// Return a mask describing all threads with a smaller Id in the warp.
LaneMaskTy lanemaskLT();
/// Return a mask describing all threads with a larget Id in the warp.
LaneMaskTy lanemaskGT();
/// Return the thread Id in the warp, in [0, getWarpSize()).
uint32_t getThreadIdInWarp();
/// Return the thread Id in the block, in [0, getBlockSize()).
uint32_t getThreadIdInBlock();
/// Return the warp id in the block.
uint32_t getWarpId();
/// Return the warp size, thus number of threads in the warp.
uint32_t getWarpSize();
/// Return the number of warps in the block.
uint32_t getNumberOfWarpsInBlock();
/// Return the block Id in the kernel, in [0, getKernelSize()).
uint32_t getBlockId();
/// Return the block size, thus number of threads in the block.
///
/// Note: The version taking \p IsSPMD mode explicitly can be used during the
/// initialization of the target region, that is before `mapping::isSPMDMode()`
/// can be called by any thread other than the main one.
uint32_t getBlockSize();
uint32_t getBlockSize(bool IsSPMD);
/// Return the number of blocks in the kernel.
uint32_t getNumberOfBlocks();
/// Return the kernel size, thus number of threads in the kernel.
uint32_t getKernelSize();
/// Return the number of processing elements on the device.
uint32_t getNumberOfProcessorElements();
} // namespace mapping
} // namespace _OMP
#endif
|
convolution_winograd_transform_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_input_packn_rvv(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
float tmp[8][8][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * packn;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _r06 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _r07 = vle32_v_f32m1(r0 + packn * 7, vl);
vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r00, _r06, vl), 5.25f, vfsub_vv_f32m1(_r04, _r02, vl), vl);
vfloat32m1_t _tmp7m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r07, _r01, vl), 5.25f, vfsub_vv_f32m1(_r03, _r05, vl), vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[7][m], _tmp7m, vl);
vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r02, _r06, vl), -4.25f, _r04, vl);
vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r01, _r05, vl), -4.25f, _r03, vl);
vfloat32m1_t _tmp1m = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _tmp2m = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl);
vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl);
vfloat32m1_t _tmp3m = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _tmp4m = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_r06, 4.f, vfmacc_vf_f32m1(_r02, -1.25f, _r04, vl), vl);
vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl);
vfloat32m1_t _tmp5m = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl);
vfloat32m1_t _tmp6m = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
vse32_v_f32m1(tmp[6][m], _tmp6m, vl);
r0 += w * packn;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * packn;
float* r0_tm_1 = r0_tm_0 + tiles * packn;
float* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
float* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
float* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
float* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
float* r0_tm_6 = r0_tm_0 + tiles * packn * 6;
float* r0_tm_7 = r0_tm_0 + tiles * packn * 7;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl);
vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl);
vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f32m1(_tmp04, _tmp02, vl), vl);
vfloat32m1_t _r0tm7 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f32m1(_tmp03, _tmp05, vl), vl);
vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl);
vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl);
vfloat32m1_t _r0tm1 = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _r0tm2 = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl);
vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl);
vfloat32m1_t _r0tm3 = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _r0tm4 = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_tmp06, 4.f, vfmacc_vf_f32m1(_tmp02, -1.25f, _tmp04, vl), vl);
vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl);
vfloat32m1_t _r0tm5 = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl);
vfloat32m1_t _r0tm6 = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl);
vse32_v_f32m1(r0_tm_0, _r0tm0, vl);
vse32_v_f32m1(r0_tm_1, _r0tm1, vl);
vse32_v_f32m1(r0_tm_2, _r0tm2, vl);
vse32_v_f32m1(r0_tm_3, _r0tm3, vl);
vse32_v_f32m1(r0_tm_4, _r0tm4, vl);
vse32_v_f32m1(r0_tm_5, _r0tm5, vl);
vse32_v_f32m1(r0_tm_6, _r0tm6, vl);
vse32_v_f32m1(r0_tm_7, _r0tm7, vl);
r0_tm_0 += tiles * packn * 8;
r0_tm_1 += tiles * packn * 8;
r0_tm_2 += tiles * packn * 8;
r0_tm_3 += tiles * packn * 8;
r0_tm_4 += tiles * packn * 8;
r0_tm_5 += tiles * packn * 8;
r0_tm_6 += tiles * packn * 8;
r0_tm_7 += tiles * packn * 8;
}
}
}
}
}
static void conv3x3s1_winograd64_transform_output_packn_rvv(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
vfloat32m1_t _bias0 = biasptr ? vle32_v_f32m1(biasptr + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
// NOTE c99 variable length array
float tmp[6][8][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * packn;
const float* output0_tm_1 = output0_tm_0 + tiles * packn;
const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * packn * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * packn * 7;
float* output0 = out0.row(i * 6) + (j * 6) * packn;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl);
vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl);
vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl);
vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl);
vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl);
vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl);
vfloat32m1_t _out0tm6 = vle32_v_f32m1(output0_tm_6, vl);
vfloat32m1_t _out0tm7 = vle32_v_f32m1(output0_tm_7, vl);
vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_out0tm5, _out0tm6, vl);
vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_out0tm5, _out0tm6, vl);
vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl);
vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl);
vfloat32m1_t _tmp5m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
output0_tm_0 += tiles * packn * 8;
output0_tm_1 += tiles * packn * 8;
output0_tm_2 += tiles * packn * 8;
output0_tm_3 += tiles * packn * 8;
output0_tm_4 += tiles * packn * 8;
output0_tm_5 += tiles * packn * 8;
output0_tm_6 += tiles * packn * 8;
output0_tm_7 += tiles * packn * 8;
}
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl);
vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl);
vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_tmp05, _tmp06, vl);
vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_tmp05, _tmp06, vl);
vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl);
vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl);
vfloat32m1_t _out04 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl);
vse32_v_f32m1(output0, _out00, vl);
vse32_v_f32m1(output0 + packn * 2, _out02, vl);
vse32_v_f32m1(output0 + packn * 4, _out04, vl);
vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl);
vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl);
vfloat32m1_t _out05 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp07, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl);
vse32_v_f32m1(output0 + packn, _out01, vl);
vse32_v_f32m1(output0 + packn * 3, _out03, vl);
vse32_v_f32m1(output0 + packn * 5, _out05, vl);
output0 += outw * packn;
}
}
}
}
}
static void conv3x3s1_winograd42_transform_input_packn_rvv(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
float tmp[6][6][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * packn;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r04, _r03, vl), -4.f, vfadd_vv_f32m1(_r01, _r02, vl), vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r03, vl), 4.f, vfsub_vv_f32m1(_r01, _r02, vl), vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), -2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl);
vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), 2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl);
vfloat32m1_t _tmp5m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
r0 += w * packn;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * packn;
float* r0_tm_1 = r0_tm_0 + tiles * packn;
float* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
float* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
float* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
float* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl);
vfloat32m1_t _r0tm1 = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f32m1(_tmp01, _tmp02, vl), vl);
vfloat32m1_t _r0tm2 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f32m1(_tmp01, _tmp02, vl), vl);
vfloat32m1_t _r0tm3 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl);
vfloat32m1_t _r0tm4 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl);
vfloat32m1_t _r0tm5 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl);
vse32_v_f32m1(r0_tm_0, _r0tm0, vl);
vse32_v_f32m1(r0_tm_1, _r0tm1, vl);
vse32_v_f32m1(r0_tm_2, _r0tm2, vl);
vse32_v_f32m1(r0_tm_3, _r0tm3, vl);
vse32_v_f32m1(r0_tm_4, _r0tm4, vl);
vse32_v_f32m1(r0_tm_5, _r0tm5, vl);
r0_tm_0 += tiles * packn * 6;
r0_tm_1 += tiles * packn * 6;
r0_tm_2 += tiles * packn * 6;
r0_tm_3 += tiles * packn * 6;
r0_tm_4 += tiles * packn * 6;
r0_tm_5 += tiles * packn * 6;
}
}
}
}
}
static void conv3x3s1_winograd42_transform_output_packn_rvv(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
vfloat32m1_t _bias0 = biasptr ? vle32_v_f32m1(biasptr + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
// NOTE variable length array
float tmp[4][6][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * packn;
const float* output0_tm_1 = output0_tm_0 + tiles * packn;
const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
float* output0 = out0.row(i * 4) + (j * 4) * packn;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl);
vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl);
vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl);
vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl);
vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl);
vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl);
vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp02a, vl), _tmp02b, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
output0_tm_0 += tiles * packn * 6;
output0_tm_1 += tiles * packn * 6;
output0_tm_2 += tiles * packn * 6;
output0_tm_3 += tiles * packn * 6;
output0_tm_4 += tiles * packn * 6;
output0_tm_5 += tiles * packn * 6;
}
for (int m = 0; m < 4; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl);
vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl), vl);
vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl), vl);
vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl);
vse32_v_f32m1(output0, _out00, vl);
vse32_v_f32m1(output0 + packn, _out01, vl);
vse32_v_f32m1(output0 + packn * 2, _out02, vl);
vse32_v_f32m1(output0 + packn * 3, _out03, vl);
output0 += outw * packn;
}
}
}
}
}
|
calc.h | #ifndef DEF_NARROWBAND_H
#define DEF_NARROWBAND_H
/* =========================================================================
Copyright (c) 2008-2015, Institute for Microelectronics, TU Wien.
-----------------
ViennaTS - The Vienna Topography Simulator
-----------------
Contact: viennats@iue.tuwien.ac.at
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
//IF PARALLEL_MODE IS ACTIVE
#include "Cells.h"
#include "Time.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "Math.h"
#include <algorithm>
#include <vector>
#include <numeric>
#include "LSlib/vector.hpp"
#include "message.h"
#include "boundaries.h"
#include <cmath>
#include <cassert>
#include "LSlib/math.hpp"
///Namespace for calculation helpers.
namespace calc {
template <int Dimensions> class Make3DVector {
const double* v;
public:
inline double operator[](int i) const {
return (i<Dimensions)?v[i]:0.;
}
Make3DVector(const double* v2):v(v2) {}
};
template <class ParameterType> class PartitionTraits {
public:
typedef int IntCoordType;
typedef unsigned int IntLinkType;
//typedef const geom::cell<D>* CellRefType;
typedef unsigned int CellRefType;
static inline CellRefType UndefinedCellRef() { return std::numeric_limits<CellRefType>::max();}//return 0;}
static inline IntLinkType UndefinedLink() { return std::numeric_limits<IntLinkType>::max();}
//static const double SurfaceAreaHeuristicLambda=ParameterType::SurfaceAreaHeuristicLambda;
//static const partition::SplittingModeType PartitionMode=ParameterType::PartitionMode;
static const int Dimension=ParameterType::Dimension;
};
template<int D, class LS, class NormalVectorVectorClass, class DistancesVectorClass>
void SetupCells( const LS& l,geom::cells<D> &Cells,
std::vector<lvlset::vec<int,D> > &CellCoordinates,
const NormalVectorVectorClass& NormalVectors,
const DistancesVectorClass& DistancesToReceiver,
double RecepterRadius
) {
Cells.clear();
//int cell_counter=0;
//int cell_equal_signs_counter=0;
//int cell_contains_disk_counter=0;
//int cell_inserted_counter=0;
for (typename LS::template const_iterator_cells_filtered<typename LS::filter_active> it(l);!it.is_finished();it.next()) {
//cell_counter++;
bool cell_contains_disk=false;
int sgn_count=0;
for (int i=0;i<(1<<D);i++) {
sgn_count+=it.corner(i).sign();
}
if ((sgn_count!=(1<<D)) && (sgn_count!=0)) {
cell_contains_disk=true;
// //std::cout << "cell_cont_disk! ";
} else {
//cell_equal_signs_counter++;
//check all corners
for (int i=0;i<(1<<D);++i) {
//if corner is active
if (it.corner(i).is_active()) {
unsigned int id=it.corner(i).active_pt_id();
const double &d=DistancesToReceiver[id];
//check if disk of active grid point intersects corresponding cell
//check for all dimensions
bool cell_contains_disk2=true;
for (int dir=0;dir < D;++dir) {
const double &n=NormalVectors[id*D+dir];
double min=n*d+((i>>dir) & 1);
double max=min;
double tmp=std::sqrt(std::max(0.,1-n*n))*RecepterRadius;
min-=tmp;
max+=tmp;
if ((max<0.) || (min>1.) ) {
cell_contains_disk2=false;
break;
}
}
if (cell_contains_disk2) {
// //std::cout << "cell_cont_disk2! ";
cell_contains_disk=true;
//cell_contains_disk_counter++;
break;
}
}
}
}
if (cell_contains_disk) {
//cell_inserted_counter++;
lvlset::vec<unsigned int, (1<<D)> points;
CellCoordinates.push_back(lvlset::vec<int,D>(it.indices()));
Cells.push_back(geom::cell<D>());
for (unsigned int i=0;i<(1<<D); i++ ) {
// if (it.corner(i).pt_id()==1028) //std::cout << "it.corner("<<i<<").pt_id():" <<it.corner(i).pt_id() <<std::endl;
Cells.back().Points[i]=it.corner(i).pt_id();
assert(it.corner(i).is_defined());
}
}
}
////std::cout << "num_cells=" << cell_counter << std::endl;
////std::cout << "num_cells_equal_signs=" << cell_equal_signs_counter << std::endl;
////std::cout << "num_cells_contains_disk=" << cell_contains_disk_counter << std::endl;
////std::cout << "num_cells_inserted=" << cell_inserted_counter << std::endl;
}
template <class LS> void CalculateNormalVectors(
const LS& l,
std::vector<double>& NormalVectors,
std::vector<double>& DistancesToReceiver,
int open_boundary_direction,
bool is_open_boundary_negative,
double ReceptorRadius,
const lvlset::vec<double,LS::dimensions> & default_directions=lvlset::vec<double,LS::dimensions>(0)) {
const int D=LS::dimensions;
lvlset::vec<double,D> t=default_directions;
double tmp=Norm(t);
if (tmp==0) {
t[open_boundary_direction]=(is_open_boundary_negative)?-1.:1.;
} else {
t/=tmp;
}
NormalVectors.clear();
NormalVectors.resize(l.num_active_pts()*D);
DistancesToReceiver.clear();
DistancesToReceiver.resize(l.num_active_pts(),0.);
//!Calculate Normalvectors
typename LS::points_type segmentation=l.get_new_segmentation();
#pragma omp for schedule(static, 1) // parallelization - Iterations divided into chunks of size 1. Each chunk is assigned to a thread
for (int p=0;p<= static_cast<int>(segmentation.size());++p) {
typename LS::point_type begin_v=(p==0)?l.grid().min_point_index():segmentation[p-1];
typename LS::point_type end_v=(p!=static_cast<int>(segmentation.size()))?segmentation[p]:l.grid().increment_indices(l.grid().max_point_index());
for (typename LS::template const_iterator_neighbor_filtered<typename LS::filter_active, 1> it(l, typename LS::filter_active(), begin_v);it.indices()<end_v;it.next()) {
double* n=&NormalVectors[it.center().active_pt_id2()*D];
double& dist=DistancesToReceiver[it.center().active_pt_id2()];
for (int i=0;i<D;i++) {
double pos=it.neighbor(i).value()-it.center().value();
double neg=it.center().value()-it.neighbor(i+D).value();
n[i]=0;
if ((pos > 0 && neg < 0) || (pos < 0 && neg > 0)) {
if (default_directions[i]<0) {
n[i]=std::min(neg,pos);
} else if (default_directions[i]>0) {
n[i]=std::max(neg,pos);
} else {
n[i]=(pos+neg)*0.5;
}
} else {
n[i]=(pos+neg)*0.5;
}
}
//for (int i=0;i<D;i++) n[i]=it.gradient2(i);
double tmp_max=std::fabs(n[0]);
for (int uu=1;uu<D;uu++) tmp_max=std::max(tmp_max,std::fabs(n[uu]));
if (tmp_max==0.) {
for (int uu=0;uu<D;uu++) n[uu]=t[uu];
dist=0.;
} else {
double no2=0.;
for (int uu=0;uu<D;uu++) no2+=my::math::pow2(n[uu]/tmp_max);
double no=tmp_max*std::sqrt(no2);
for (int uu=0;uu<D;uu++) n[uu]/=no;
dist=-it.center().value()/no;
}
//reduce distance if receptor disk would not completely lie inside of attached voxels
for (int i=0;i<D;i++) {
dist=my::math::Sign(dist)*std::max(
0.,
std::min(
std::fabs(dist),
(1.-ReceptorRadius*std::sqrt(
std::max(0.,1-n[i]*n[i])
))/std::fabs(n[i]))
);
assert(!std::isnan(dist));
}
//for (int i=0;i<D;++i) NormalVectors.push_back(n[i]);
//DistancesToReceiver.push_back(dist);
}
}
}
template <class LS> void CalculateCurvatureVectors(const LS& l, std::vector<double>& CurvatureVectors, bool initialized) {
////std::cout << "here!\n";
const int D=LS::dimensions;
typedef typename LS::index_type index_type;
CurvatureVectors.clear();
CurvatureVectors.resize(l.num_active_pts());
typename LS::points_type segmentation=l.get_new_segmentation();
std::vector<typename LS::const_iterator_runs_offset> it_neighbors;
#pragma omp for schedule(static, 1) // parallelization - Iterations divided into chunks of size 1. Each chunk is assigned to a thread
for (int p=0;p<= static_cast<int>(segmentation.size());++p) {
typename LS::point_type begin_v=(p==0)?l.grid().min_point_index():segmentation[p-1];
typename LS::point_type end_v=(p!=static_cast<int>(segmentation.size()))?segmentation[p]:l.grid().increment_indices(l.grid().max_point_index());
for (typename LS::template const_iterator_neighbor_filtered<typename LS::filter_active, 1> it(l, typename LS::filter_active(), begin_v);it.indices()<end_v;it.next()) {
double* curv=&CurvatureVectors[it.center().active_pt_id2()];
if (initialized) {
for (unsigned int i=0;i<it_neighbors.size();i++) it_neighbors[i].go_to_indices_sequential(it.indices());
} else {
for (int i=-1;i<=1;++i) {
for (int j=-1;j<=1;++j) {
for (int k=-1;k<=1;++k) {
if (((i!=0) || (j!=0) || (k!=0)) && ((i==0) || (j==0) || (k==0))) {
lvlset::vec<index_type,D> v(i,j,k);
////std::cout << "v(" << v[0] << ", " << v[1] << ", " << v[2] << ")\n";
it_neighbors.push_back(typename LS::const_iterator_runs_offset(l, v,it.indices()));
}
}
}
}
initialized=true;
}
const int XmYmZ0=0;
const int XmY0Zm=1;
const int XmY0Z0=2;
const int XmY0Zp=3;
const int XmYpZ0=4;
const int X0YmZm=5;
const int X0YmZ0=6;
const int X0YmZp=7;
const int X0Y0Zm=8;
const int X0Y0Zp=9;
const int X0YpZm=10;
const int X0YpZ0=11;
const int X0YpZp=12;
const int XpYmZ0=13;
const int XpY0Zm=14;
const int XpY0Z0=15;
const int XpY0Zp=16;
const int XpYpZ0=17;
double PhiX=(it_neighbors[XpY0Z0].value()-it_neighbors[XmY0Z0].value())*0.5;
double PhiY=(it_neighbors[X0YpZ0].value()-it_neighbors[X0YmZ0].value())*0.5;
double PhiZ=(it_neighbors[X0Y0Zp].value()-it_neighbors[X0Y0Zm].value())*0.5;
double PhiXX=it_neighbors[XpY0Z0].value()+it_neighbors[XmY0Z0].value()-2*it.center().value();
double PhiYY=it_neighbors[X0YpZ0].value()+it_neighbors[X0YmZ0].value()-2*it.center().value();
double PhiZZ=it_neighbors[X0Y0Zp].value()+it_neighbors[X0Y0Zm].value()-2*it.center().value();
double PhiXY=(it_neighbors[XpYpZ0].value()+it_neighbors[XmYmZ0].value()-it_neighbors[XpYmZ0].value()-it_neighbors[XmYpZ0].value())*0.25;
double PhiXZ=(it_neighbors[XpY0Zp].value()+it_neighbors[XmY0Zm].value()-it_neighbors[XpY0Zm].value()-it_neighbors[XmY0Zp].value())*0.25;
double PhiYZ=(it_neighbors[X0YpZp].value()+it_neighbors[X0YmZm].value()-it_neighbors[X0YmZp].value()-it_neighbors[X0YpZm].value())*0.25;
//const int mode=0;
double denom=PhiX*PhiX+PhiY*PhiY+PhiZ*PhiZ;
double num= 0.5*PhiX*PhiX*(PhiYY+PhiZZ)-PhiY*PhiZ*PhiYZ+ //mean curvature
0.5*PhiY*PhiY*(PhiXX+PhiZZ)-PhiX*PhiZ*PhiXZ+
0.5*PhiZ*PhiZ*(PhiXX+PhiYY)-PhiX*PhiY*PhiXY;
// double s=0.;
//if (material<material_level) {
if (denom!=0) {
*curv=num/(denom*std::sqrt(denom));
////std::cout << "*curv: " << *curv << std::endl;
// if ((k>max_curvature) || (k<min_curvature)) s= -num/denom;
} else {
//std::cout << "warning!!!dlkajf" << std::endl;
if (num>0) {
*curv=-std::numeric_limits<double>::max();
} else {
*curv=std::numeric_limits<double>::max();
}
}
//}
// return s;
// //std::cout << "curv: " << *curv << "\n";
}
}
//!Calculate Curvature vectors
}
namespace {
template<int D, class PartitionType>
class ClusterPositionType {
public:
double X[D];
typename PartitionType::subbox Subbox;
};
}
// [Josef] Main function where particles are tracked and collision with interface is checked!
template <class ModelType, class ParameterType, class PartitionType, class LevelSetType> void CalculateRates(
const ModelType &Model,
const ParameterType &Parameter,
const PartitionType &Partition,
const LevelSetType &SurfaceLevelSet,
const std::vector<double>& NormalVectors,
const std::vector<double>& DistancesToReceiver,
const std::vector<double>& Coverages,
std::vector<double>& Rates,
const std::vector<unsigned int>& PointMaterials,
const geom::cells<ParameterType::Dimension>& Cells,
double ProcessTime) {
// std::cout << "1\n";
const int D=ParameterType::Dimension;
typedef ClusterPositionType<D, PartitionType> ClusterPositionType;
const double ReceptorRadius=Parameter.receptor_radius;
const double ReceptorRadius2=ReceptorRadius*ReceptorRadius;
const double further_tracking_distance=Parameter.further_tracking_distance; //default is 3
//Initialize Rates
unsigned int num_active_points=SurfaceLevelSet.num_active_pts();
Rates.clear();
if(NormalVectors.size()!=num_active_points*D){
std::cout << "Assert normal vector size: " << NormalVectors.size() << " = " << num_active_points*D << "\n";
assert(0);
}
if(Coverages.size()<num_active_points*Model.CoverageStorageSize){
std::cout << "Assert Coverage size: " << Coverages.size() << " >= " << num_active_points*Model.CoverageStorageSize << "\n";
assert(0);
}
#ifdef _OPENMP
const int max_threads=omp_get_max_threads();
#else
const int max_threads=1;
#endif
std::vector<std::vector<double > > all_tmp_Rates(
max_threads,
std::vector<double>(num_active_points*Model.RatesStorageSize,0.)
);
double RecepterArea=(D==3)?(Parameter.receptor_radius*Parameter.receptor_radius*my::math::Pi):(2.*Parameter.receptor_radius);
if (!ModelType::SpatiallyEqualDistributedFlux) {
RecepterArea*=Parameter.grid_delta;
if (D==3) RecepterArea*=Parameter.grid_delta;
}
////std::cout << "Recepter Area: " << RecepterArea << endl;
#pragma omp parallel
{
//determine the number of different starting locations
//in case of equally distributed flux the number of starting places is equal to the open surface area measured in grid spacings
//in case of non-equally distributed flux the number of starting places is set to the number of threads
#ifdef _OPENMP
const int my_num_threads=omp_get_num_threads();
const int my_thread_num=omp_get_thread_num();
#else
const int my_num_threads=1;
const int my_thread_num=0;
#endif
const int NumStartingPlaces=(ModelType::SpatiallyEqualDistributedFlux)?
static_cast<int>(Partition.AreaSize(Parameter.open_boundary)): //AXIS
my_num_threads;
//for each thread a vector is defined, where the rates are stored
std::vector<double>& tmp_Rates=all_tmp_Rates[my_thread_num];
assert(tmp_Rates.size()==num_active_points*Model.RatesStorageSize);
//std::cout << "assert temp rates size \n";
//stacks to store the particles and their positions
std::stack<typename ModelType::ParticleType> ParticleStack;
std::stack<ClusterPositionType> ParticlePositionsStack;
//beginning of parallel section with dynamic scheduling
#pragma omp for schedule(dynamic) //Chunks are dynamically assigned to threads on a first-come, first-serve basis as threads become available.
for (int StartingPlace=0;StartingPlace<NumStartingPlaces;++StartingPlace) { //for each starting place do
//used to store the partition subbox the particles starts from
typename PartitionType::subbox starting_subbox;
//the start position of the particle (in global coordinates)
double StartPosition[3];
//if spatially equal distributed flux, determine the start_box
if (ModelType::SpatiallyEqualDistributedFlux) {
unsigned int tmp_s=StartingPlace;
int tmp_dim=Parameter.open_boundary; //AXIS
if (tmp_dim==0) tmp_dim=D;
--tmp_dim;
for (int i=0;i<D-2;++i) {
StartPosition[tmp_dim]=tmp_s%Partition.Extension(tmp_dim);
tmp_s/=Partition.Extension(tmp_dim);
if (tmp_dim==0) tmp_dim=D;
--tmp_dim;
}
StartPosition[tmp_dim]=tmp_s;
starting_subbox=Partition.Access(StartPosition, Parameter.open_boundary, Parameter.open_boundary_negative);
//std::cout << "equaldistributed \n";
}
//for each involved particle type do
for (unsigned int ParticleType=0;ParticleType<Model.NumberOfParticleTypes;++ParticleType) {
//if (ParticleType==1) //std::cout << "AH!!!\n";
//determine the number of particles which have to be simulated
const unsigned int NumOfParticles=(ModelType::SpatiallyEqualDistributedFlux)?
Model.NumberOfParticleClusters[ParticleType]:
Model.NumberOfParticleClusters[ParticleType]/my_num_threads
+(my_thread_num < static_cast<int>(Model.NumberOfParticleClusters[ParticleType]%my_num_threads)?1:0);
//for each particle do
for (unsigned int ParticleCounter=0;ParticleCounter<NumOfParticles;++ParticleCounter) {
//std::cout << "\nparticles\n";
//generate cluster energy and direction
typename ModelType::ParticleType p;
//typename ModelType::TipHeightType dist;
Model.ParticleGeneration(p,ParticleType,ProcessTime, StartPosition);
//std::cout << "\nparticlegeneration\n";
//if particle is not moving downwards
if (Parameter.open_boundary_negative) {
if(p.Direction[Parameter.open_boundary]<=0.) continue;
} else {
if(p.Direction[Parameter.open_boundary]>=0.) continue;
}
//calculate represented flux by that particle
p.Flux/=Model.NumberOfParticleClusters[ParticleType];
// //std::cout<<"p.Flux1="<<p.Flux<<"\n";
p.Flux/=RecepterArea;
// //std::cout<<"p.Flux2="<<p.Flux<<"\n";
//determine starting position and starting subbox
ClusterPositionType cp;
if (ModelType::SpatiallyEqualDistributedFlux) {
//if flux is equal distributed
//chose random start position
for (int i=0;i<D;++i) {
cp.X[i]=StartPosition[i];
if (i!=Parameter.open_boundary) cp.X[i]+=my::stat::RandomNumber();
}
cp.Subbox=starting_subbox;
//determine additional particles, which are necessary to account for extended boundaries
int zmax[D-1];
int dir=Parameter.open_boundary;
for (int i=0;i<D-1;++i) {
dir=(Parameter.open_boundary+i+1)%D;
zmax[i]=0;
if (dir!=Parameter.open_boundary) {
if ((Parameter.boundary_conditions[dir].min==bnc::EXTENDED_BOUNDARY) && (p.Direction[dir]>0)) {
zmax[i]=static_cast<int>(
std::ceil(
(
-std::min( std::fabs((Partition.Extension(Parameter.open_boundary)*p.Direction[dir])/p.Direction[Parameter.open_boundary]),
static_cast<double>(Parameter.max_extended_starting_position)
)
-(cp.X[dir]+(cp.Subbox.Min(dir)-Partition.Min(dir)))
)/Partition.Extension(dir)
)
);
assert(zmax[i]<=0);
} else if ((Parameter.boundary_conditions[dir].max==bnc::EXTENDED_BOUNDARY) && (p.Direction[dir]<0)) {
zmax[i]=static_cast<int>(
std::floor(
(
std::min( std::fabs((Partition.Extension(Parameter.open_boundary)*p.Direction[dir])/p.Direction[Parameter.open_boundary]),
static_cast<double>(Parameter.max_extended_starting_position)
)
-(cp.X[dir]+(cp.Subbox.Min(dir)-Partition.Min(dir)))
)/Partition.Extension(dir)
)
)+1;
assert(zmax[i]>=0);
}
}
}
int counter[D-1];
for (int k=0;k<D-1;++k) counter[k]=0;
//add additional particles to the stack
while (true) {
int h=0;
for (;h<D-1;++h) {
if (counter[h]!=zmax[h]) {
if (zmax[h]>0) ++counter[h]; else --counter[h];
break;
} else {
counter[h]=0;
}
}
if (h==D-1) break;
ClusterPositionType new_cp;
for (int g=0;g<D-1;++g) {
int dir=(g+Parameter.open_boundary+1)%D;
new_cp.X[dir]= cp.X[dir]+
static_cast<double>(cp.Subbox.Min(dir)-Partition.Min(dir))+ //TODO check!!!
static_cast<double>(counter[g])*static_cast<double>(Partition.Extension(dir));
}
new_cp.Subbox=Partition.Access(new_cp.X, Parameter.open_boundary, Parameter.open_boundary_negative);
ParticlePositionsStack.push(new_cp);
ParticleStack.push(p);
}
//std::cout << "again EDF\n";
}
else {
for (int i=0;i<D;++i) cp.X[i]=StartPosition[i]/Parameter.grid_delta; //scale starting position
double t=-( cp.X[Parameter.open_boundary]-
((Parameter.open_boundary_negative)?Partition.Min(Parameter.open_boundary):Partition.Max(Parameter.open_boundary))
)/p.Direction[Parameter.open_boundary];
//Move cp.X to the top LS surface and update horizontal axis values (not open boundary value)
for (int dir=0;dir<D;++dir) {
if (dir!=Parameter.open_boundary) {
bool ReverseSign;
cp.X[dir]=Parameter.boundary_conditions[dir].map_coordinate(Partition.Min(dir), Partition.Max(dir),cp.X[dir]+p.Direction[dir]*t, ReverseSign);
if (ReverseSign) p.Direction[dir]=-p.Direction[dir];
}
}
cp.Subbox=Partition.Access(cp.X, Parameter.open_boundary, Parameter.open_boundary_negative);
//cp.X is now position within subbox after removing the "global components"
}
//loop until particle stack is empty
while (true) {
//initialize the travelled distance from the intersection with -oo
double travelled_distance_from_intersection(-std::numeric_limits<double>::max());
//std::cout << "DTFI\n";
//the indices of the surface grid cell which was previously visited
int last_surface_cell_indices[D];
for (int r=0;r<D;++r) last_surface_cell_indices[r]=Partition.Min(r)-2; //initialize with invalid indices
//Iterate through the cells between LS.Max() and LS.Min() until surface reached or particle exits environment
while (true) {
//std::cout << "particleIteration\n";
//get reference to actual cluster
const typename PartitionType::subbox &Subbox= cp.Subbox;
//#######################################################
//# find max distance within box #
//#######################################################
double max_distance_in_box=std::numeric_limits<double>::max();
int LeavingDirection=-1; //LeavingDirection : 0,1,2 particle leaves box in x,y,z direction respectively
std::bitset<D> PositionStatusPos; //for each direction the bit is set if a particle is outside (in positive direction) of the regular simulation domain
std::bitset<D> PositionStatusNeg; //for each direction the bit is set if a particle is outside (in negative direction) of the regular simulation domain
//for each dimension do
int i;
for (i=0;i<D;i++) {
//std::cout << "Dimension\n";
double t_temp=std::numeric_limits<double>::max();
//Subbox.Extension(dir) is the length of the subbox in the dir direction
//Subbox.Min(dir) is the global coordinate (grid points) of the Subbox edge in the min dir direction
//Subbox.Max(dir) is the global coordinate (grid points) of the Subbox edge in the max dir direction
//Global coordinate is then found by Subbox.Min(dir)+cp.X[dir]
//When outside the min extended boundary
if ((cp.X[i]<=0) && (Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY)) {
if (p.Direction[i]>0.) {
if (cp.X[i]==0) {
t_temp=(Subbox.Extension(i)-cp.X[i])/p.Direction[i];
} else {
t_temp=-cp.X[i]/p.Direction[i];
PositionStatusNeg.set(i);
}
} else {
if (cp.X[i]<0/*-Parameter.DomainExtension*/) break;
PositionStatusNeg.set(i);
}
//When outside the max extended boundary
} else if ((cp.X[i]>=Subbox.Extension(i)) && (Parameter.boundary_conditions[i].max==bnc::EXTENDED_BOUNDARY)) {
if (p.Direction[i]<0.) {
if (cp.X[i]==Subbox.Extension(i)) {
t_temp=-cp.X[i]/p.Direction[i];
} else {
t_temp=(Subbox.Extension(i)-cp.X[i])/p.Direction[i];
PositionStatusPos.set(i);
}
} else {
if (cp.X[i]>Subbox.Extension(i)/*+Parameter.DomainExtension*/) break;
PositionStatusPos.set(i);
}
} else {
if (p.Direction[i]>0.) {
//t_temp is the variable to determine time to reach Subbox.Extension(i) from cp.X in p.Direction[i]
t_temp=(Subbox.Extension(i)-cp.X[i])/p.Direction[i];
} else if (p.Direction[i]<0.) {
t_temp=-cp.X[i]/p.Direction[i];
}
}
//Determine which axis is the leaving direction of the particle
if (t_temp<max_distance_in_box) {
max_distance_in_box=t_temp;
LeavingDirection=i;
}
}
//cp.X remains unchanged at this point, only max_distance_in_box and LeavingDirection are found
// //std::cout << "cp.X4 " << "(" << cp.X[0] << "," << cp.X[1] << "," << cp.X[2] << ")\n";
// //std::cout << "max_dinstance_in_box: " << max_distance_in_box << "\n";
// //std::cout << "LeavingDirection: " << LeavingDirection << "\n";
//Now have max_distance_in_box = distance from cp.X to point of exit in the i direction
//where i is the leaving direction (x,y,z)=(0,1,2)
if ((i!=D) || (LeavingDirection==-1)) break;
//When the subbox which received the particle also contains within it the surface boundary
if (Subbox.ContainsCell()) {
// std::cout << "ContainsCell()\n";
//if subbox is a surface grid cell
const geom::cell<D> &Cell=Cells[Subbox.Cell()];
//std::cout << "containsCell\n";
//Calculate the exit direction and distance as before to see if surface is intersected
//#######################################################
//# check for surface intersection #
//#######################################################
//[Josef] This is where the particle is tracked within a subbox for surface intersection
//Check if Surface is intersected between position and position+max_distance_in_box*direction
if (travelled_distance_from_intersection==-std::numeric_limits<double>::max()) {
//get distances at corners
double Rho[1<<D];
int sgn_count=0;
for (int i=0;i<(1<<D);i++) {
Rho[i]=SurfaceLevelSet.value2(Cell.Points[((std::bitset<D>(i) | PositionStatusPos) & (~PositionStatusNeg)).to_ulong()]);
//std::cout << "Rho[" << i << "] = " << Rho[i] << "\n";
if (Rho[i]>0) sgn_count++;
}
// std::cout << "HERE!\n";
if (sgn_count!=(1<<D)) {
my::math::TransformRho2<D>::exec(Rho);
double relative_distance_to_intersection;
if (sgn_count==0) {
relative_distance_to_intersection=0.;
} else {
my::math::Polynom<double, D> poly;
my::math::DetermineCoefficientsForImplicitRayTracing<D>(
cp.X,
p.Direction,
Rho,
&(poly.Coefficients()[0])
);
// std::cout << "cp.X5: (" << cp.X[0] << "," << cp.X[1] << "," << cp.X[2] << ")\n";
relative_distance_to_intersection=my::math::FindFirstTransitionFromPosToNegOfPolynomNewton(0., max_distance_in_box, poly,1e-6);
}
if (relative_distance_to_intersection < std::numeric_limits<double>::max()) { //if particle hits surface
//std::cout << "hits\n";
travelled_distance_from_intersection=-relative_distance_to_intersection;
ClusterPositionType new_cp=cp;
for (int kk=0;kk<D;kk++) new_cp.X[kk]+=relative_distance_to_intersection*p.Direction[kk];
// std::cout << "RDI\n";
//p.Direction[kk] is unchanged at this point
//new_cp.X contains the coordinates within the Subbox at the subbox exit point
//determine normal vector
double tmp_normalvec[3];
my::math::CalculateNormal<D>(tmp_normalvec,new_cp.X,Rho);
if (D==2) tmp_normalvec[2]=0.;
// std::cout << "tmp_normalvec(): " << tmp_normalvec[0] << ", " << tmp_normalvec[1] << ", " << tmp_normalvec[2] << "\n";
double dot=tmp_normalvec[0]*p.Direction[0];
for (int w=1;w<D;++w) dot+=tmp_normalvec[w]*p.Direction[w];
if (dot>=0.) {
msg::print_warning("Particle hits negative side of surface! Particle is skipped.");
break;
}
//calculate nearest active grid point to determine coverages and material (using Manhattan distance for speedup)
unsigned int gp=0;
unsigned int mat=0;
if ((ModelType::CoverageStorageSize>0) || (ModelType::ReemissionIsMaterialDependent)) {
// std::cout << "CSS>0 RMD\n";
double dist=std::numeric_limits<double>::max();
// std::cout << "D = " << D << "\n";
for (int g=0;g<(1<<D);g++) {
// std::cout << "g = " << g << "\n";
unsigned int tmp_gp= SurfaceLevelSet.active_pt_id(Cell.Points[g]);
// std::cout << "tmp_gp = " << tmp_gp << "\n";
// std::cout << "new_cp = " << new_cp.X[0] << ", " << new_cp.X[1] << ", " << new_cp.X[2] << "\n";
// std::cout << "LevelSetType::INACTIVE = " << LevelSetType::INACTIVE << "\n";
if (tmp_gp!=LevelSetType::INACTIVE) {
// std::cout << "4\n";
double tmp_dist=0;
// std::cout << "5\n";
for (int iii=0;iii<D;iii++) tmp_dist+=(((g & (1<<iii))==0)?(new_cp.X[iii]):(1.-new_cp.X[iii]));
// std::cout << "6\n";
// std::cout << "dist = " << dist << "\n";
// std::cout << "tmp_dist = " << tmp_dist << "\n";
if (tmp_dist<dist) {
// std::cout << "7\n";
dist=tmp_dist;
gp=tmp_gp;
// std::cout << "gp = " << gp << "\n";
}
}
}
// std::cout << "PointMaterials["<<gp<<"] = "<<PointMaterials[gp]<<"\n";
mat=PointMaterials[gp];
}
//perform particle reemission
// std::cout << "Reflection\n";
Model.ParticleReflexion( p,
ParticleStack,
tmp_normalvec,
&Coverages[gp*Model.CoverageStorageSize],
mat//, D, dot
);
while (ParticleStack.size()>ParticlePositionsStack.size()) ParticlePositionsStack.push(new_cp);
}
}
}
//std::cout << "positionStatusPos\n";
if (PositionStatusPos.none() && PositionStatusNeg.none()) {
//#####################################################################
//# determine corners which have to be checked for disk intersections #
//#####################################################################
std::bitset<(1<<D)> corners;
for (int dir=0;dir<D;++dir) {
switch(Subbox.Min(dir)-last_surface_cell_indices[dir]) {
case 0:
break;
case 1:
corners>>= (1<<dir);
corners|= ((dir<2)?((dir<1)?0xAA:0xCC):0xF0);
break;
case -1:
corners<<= (1<<dir);
corners|= ((dir<2)?((dir<1)?0x55:0x33):0x0F);
break;
default:
corners.set();
}
}
for (int s=0;s<D;++s) last_surface_cell_indices[s]=Subbox.Min(s);
//#######################################################
//# check for disk intersections #
//#######################################################
//[Josef] This is where the four corners of the box containing the particle are checked for intersection
//all 8 neighbors have to be checked if they are active and their disks are hit
for (int g=0;g<(1<<D);g++) {
if(corners.test(g)) {
unsigned int gp= SurfaceLevelSet.active_pt_id(Cell.Points[g]);
if (gp!=LevelSetType::INACTIVE) {
unsigned int gpD=gp*D;
double cos=-NormalVectors[gpD]*p.Direction[0];
for (int kk=1;kk<D;kk++) cos-=NormalVectors[gpD+kk]*p.Direction[kk];
if (cos > 0.) {
//calculate relative position to disk midpoint
double rel_pos[D];
for (int kk=0;kk<D;kk++) rel_pos[kk]=cp.X[kk]-((g>>kk) & 1)-NormalVectors[gpD+kk]*DistancesToReceiver[gp];
//rel_pos holds cp.X not cp_new.X
//calculate rel_pos*disk_normal
double rel_pos_dot_normal=rel_pos[0]*NormalVectors[gpD];
for (int kk=1;kk<D;kk++) rel_pos_dot_normal+=rel_pos[kk]*NormalVectors[gpD+kk];
if ( rel_pos_dot_normal <= (further_tracking_distance-travelled_distance_from_intersection)*cos ) {
double tmpx=my::math::pow2(rel_pos[0]*cos+p.Direction[0]*rel_pos_dot_normal);
for (int kk=1;kk<D;kk++) tmpx+=my::math::pow2(rel_pos[kk]*cos+p.Direction[kk]*rel_pos_dot_normal);
if (tmpx<=cos*cos*ReceptorRadius2) {
int Factor=1;
for (int kk=0;kk<D;kk++) {
if (kk!=Parameter.open_boundary) {
if ((Parameter.boundary_conditions[kk].min==bnc::REFLECTIVE_BOUNDARY) || (Parameter.boundary_conditions[kk].min==bnc::EXTENDED_BOUNDARY)) {
if ((g & (1<<kk))==0) {
if (Partition.Min(kk)==Subbox.Min(kk)) {
if (cp.X[kk]*cos+p.Direction[kk]*rel_pos_dot_normal>=0.) {
Factor<<=1;
} else {
Factor=0;
break;
}
}
} else {
if (Partition.Max(kk)==Subbox.Max(kk)) {
if (cp.X[kk]*cos+p.Direction[kk]*rel_pos_dot_normal<=cos) {
Factor<<=1;
} else {
Factor=0;
break;
}
}
}
}
}
}
for (;Factor>0;--Factor) {
//[Josef] Here, the particle has collided with the surface so the model's function to deal with this is called.
// int mat = 0;
// if ((ModelType::CoverageStorageSize>0) || (ModelType::ReemissionIsMaterialDependent))
// mat = PointMaterials[gp];
//std::cout << "Collision\n";
Model.ParticleCollision( p,
Make3DVector<D>(&NormalVectors[gpD]),
//&NormalVectors[gpD],
&(tmp_Rates[gp*Model.RatesStorageSize]),
&(Coverages[gp*Model.CoverageStorageSize]),
ProcessTime//,
// mat
);
}
}
}
}
}
}
}
}
}
//Dealing with particles beyond simulation boundaries and boundary conditions:
//######################################################################
//# check if calculation of particle cluster trajectory can be stopped #
//######################################################################
if (travelled_distance_from_intersection!=-std::numeric_limits<double>::max()) {
travelled_distance_from_intersection+=max_distance_in_box;
if (travelled_distance_from_intersection>=further_tracking_distance) break;
}
//#######################################################
//# calculate exit point #
//#######################################################
for (int kk=0;kk<D;kk++) {
if (kk!=LeavingDirection) cp.X[kk]+=p.Direction[kk]*max_distance_in_box;
}
if (PositionStatusNeg.test(LeavingDirection)) { //particle enters regular simulation domain from negative side
cp.X[LeavingDirection]=0;
} else if (PositionStatusPos.test(LeavingDirection)) { //particle enters regular simulation domain from positive side
cp.X[LeavingDirection]=cp.Subbox.Extension(LeavingDirection);
} else {
//#######################################################
//# get next box #
//#######################################################
int old_min=cp.Subbox.Min(LeavingDirection);
bool IsDirectionPositive=(p.Direction[LeavingDirection]>=0.);
if (Partition.GoToNeighborBox(cp.Subbox,cp.X,LeavingDirection, IsDirectionPositive)) {
if (IsDirectionPositive) {
if (Parameter.boundary_conditions[LeavingDirection].max==bnc::INFINITE_BOUNDARY) break;
if (Parameter.boundary_conditions[LeavingDirection].max==bnc::REFLECTIVE_BOUNDARY) p.Direction[LeavingDirection]=-p.Direction[LeavingDirection];
cp.X[LeavingDirection]=cp.Subbox.Extension(LeavingDirection);
} else {
if (Parameter.boundary_conditions[LeavingDirection].min==bnc::INFINITE_BOUNDARY) break;
if (Parameter.boundary_conditions[LeavingDirection].min==bnc::REFLECTIVE_BOUNDARY) p.Direction[LeavingDirection]=-p.Direction[LeavingDirection];
cp.X[LeavingDirection]=0;
}
last_surface_cell_indices[LeavingDirection]=Partition.Min(LeavingDirection)-2;
} else {
cp.X[LeavingDirection]=(IsDirectionPositive)?0:cp.Subbox.Extension(LeavingDirection);
if ((Parameter.boundary_conditions[LeavingDirection].min==bnc::PERIODIC_BOUNDARY) && PositionStatusPos.none() && PositionStatusNeg.none()) {
if (IsDirectionPositive) {
if (old_min>=cp.Subbox.Min(LeavingDirection)) last_surface_cell_indices[LeavingDirection]-=Partition.Extension(LeavingDirection);
} else {
if (old_min<=cp.Subbox.Min(LeavingDirection)) last_surface_cell_indices[LeavingDirection]+=Partition.Extension(LeavingDirection);
}
}
}
}
}
// std::cout << ParticleStack.size() << std::endl;
if (ParticleStack.empty()) break;
//#######################################################
//# retrieve particle from stack #
//#######################################################
p=ParticleStack.top();
ParticleStack.pop();
cp=ParticlePositionsStack.top();
ParticlePositionsStack.pop();
} // end while loop: until particle stack is empty
}//end of particle loop
}//end of particle type loop
}
#pragma omp single //run by a single available thread.
{
Rates.swap(all_tmp_Rates[0]);
}
#pragma omp for
for (int i=0;i<static_cast<int>(Rates.size());i++) {
for (int j=1;j<my_num_threads;j++) {
Rates[i]+=all_tmp_Rates[j][i];
}
}
// [josef] now that all thead-exclusive thread rates have been merged, we can output them
if (Model.OutputFluxes) {
{
std::ofstream outputfile("rates.csv");
for (typename LevelSetType::const_iterator_runs it(SurfaceLevelSet); !it.is_finished(); it.next())
{
if(it.active_pt_id() != LevelSetType::INACTIVE)
{
for (int j=0;j<D;j++) outputfile << (it.start_indices()[j]) << " ";
outputfile << Rates[it.active_pt_id()] << std::endl;
}
}
outputfile.close();
}
{
std::ofstream outputfile("rates_griddelta.csv");
for (typename LevelSetType::const_iterator_runs it(SurfaceLevelSet); !it.is_finished(); it.next())
{
if(it.active_pt_id() != LevelSetType::INACTIVE)
{
for (int j=0;j<D;j++) outputfile << (it.start_indices()[j])*Parameter.grid_delta << " ";
outputfile << Rates[it.active_pt_id()] << std::endl;
}
}
outputfile.close();
}
}
}
//local_time=my::time::GetTime()-StartTime;
}
// template<class ModelType> void UpdateCoverages(const std::vector<double>& Rates, std::vector<double>& Coverages, const ModelType& Model) {
template<class ModelType> void UpdateCoverages(const std::vector<double>& Rates, std::vector<double>& Coverages,
const ModelType& Model, double &time_step) {//, double ¤t_time) {
double* c=&Coverages[0];
const double* r=&Rates[0];
while (r!=&(*(Rates.end()))) {
Model.UpdateCoverage(c, r, time_step);//, current_time);
// //std::cout << "time_step = " << time_step << "\n";
// else Model.UpdateCoverage(c, r);
c+=Model.CoverageStorageSize;
r+=Model.RatesStorageSize;
}
}
template<class ModelType> void UpdateCoverages(const std::vector<double>& Rates, std::vector<double>& Coverages, const ModelType& Model) {
double* c=&Coverages[0];
const double* r=&Rates[0];
while (r!=&(*(Rates.end()))) {
// if (time_step != 0) Model.UpdateCoverage(c, r, time_step);
Model.UpdateCoverage(c, r);
c+=Model.CoverageStorageSize;
r+=Model.RatesStorageSize;
}
}
}
#endif //DEF_NARROWBAND_H
|
GB_binop__max_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_01__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint8)
// A*D function (colscale): GB (_AxD__max_uint8)
// D*A function (rowscale): GB (_DxB__max_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint8)
// C=scalar+B GB (_bind1st__max_uint8)
// C=scalar+B' GB (_bind1st_tran__max_uint8)
// C=A+scalar GB (_bind2nd__max_uint8)
// C=A'+scalar GB (_bind2nd_tran__max_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
MIMC_module.c | #ifndef _MIMC2_MODULE_
#include "MIMC_module.h"
#endif
#ifndef _GMA_
#include "GMA.h"
#endif
#ifndef _MATH_H_
#include <math.h>
#endif
#ifndef _STDIO_H_
#include <stdio.h>
#endif
#ifndef _TIME_H_
#include <time.h>
#endif
#define MIN_DN 0.0000000001
#ifndef _OMP_H
#include <omp.h>
#endif
extern float dt; //temporal baseline
extern int32_t num_dp; //number of multiple matching attempts
extern int32_t num_grid,dimx_vmap,dimy_vmap; //# of grids, dimension in mapx and mapy direction
extern param param_mimc2; //parameters that controlls the software
extern GMA_float **kernel;
int get_offset_image(GMA_float *i0,GMA_float *i1,GMA_float **kernel, GMA_double *xyuvav, int32_t *offset, GMA_uint8 *flag_cp)
{
//uint8_t isMeasurementSuccessful=1;
int32_t uvoi[2];
int32_t cnt,cntrow,cntrow1,cnt1,cnt2,cnt3,cntu,cntv;
int32_t num_cp,num_cp_candidate;
uint8_t *flag_cp_candidate=(uint8_t *)malloc(sizeof(uint8_t)*xyuvav->nrows);
GMA_int32 *uv_pivot_cp;
GMA_double *xyuvav_cp,*xyuvav_cp_sub;
GMA_float *dp_cp[16], *dp_cp_sub[16];
GMA_float *duv_cp;
GMA_float *stack_dp_dp;
GMA_float **imgchip_i0;
GMA_float **imgchip_i1;
int32_t *array_index_segment;
int32_t ocw_chip=param_mimc2.vec_ocw[2]+param_mimc2.AW_CRE+2;
int32_t num_dp_original=num_dp;
//int32_t num_cp_least=50;
char flag_sucessful_ft_cp=0;
num_dp=16;//temporary value; don't forget to revert this value to the original
GMA_float **mvn_dp_cp_sub;
//determine the number of control points
//printf("%f, %d\n",xyuvav->nrows*param_mimc2.ratio_cp, param_mimc2.num_cp_max);
if(xyuvav->nrows*param_mimc2.ratio_cp > param_mimc2.num_cp_max)
{
num_cp=param_mimc2.num_cp_max;
}
else
{
num_cp=(int32_t)(xyuvav->nrows*param_mimc2.ratio_cp);
}
printf("Threshold for the # of CP=%d\n",num_cp);
//determine the candidate for control points
printf("Finding candidates for the control points...");
num_cp_candidate=0;
for(cntrow=0;cntrow<xyuvav->nrows;cntrow++)
{
float spd_apv_sq=xyuvav->val[cntrow][4]*xyuvav->val[cntrow][4]+xyuvav->val[cntrow][5]*xyuvav->val[cntrow][5];
if(spd_apv_sq < param_mimc2.thres_spd_cp*param_mimc2.thres_spd_cp)
{
flag_cp_candidate[cntrow]=1;
num_cp_candidate++;
}
else
{
flag_cp_candidate[cntrow]=0;
}
}
//exclude the CP candidate if the corresponding image chip is not valid
GMA_float *sarea_i0=GMA_float_create(param_mimc2.vec_ocw[2]*2+1,param_mimc2.vec_ocw[2]*2+1);
GMA_float *sarea_i1=GMA_float_create(param_mimc2.vec_ocw[2]*2+1,param_mimc2.vec_ocw[2]*2+1);
int32_t sum_invalid_i0;
int32_t sum_invalid_i1;
int32_t thres_numpx=(param_mimc2.vec_ocw[2]*2+1)*(param_mimc2.vec_ocw[2]*2+1)/2;
for(cntrow=0;cntrow<xyuvav->nrows;cntrow++)
{
if(flag_cp_candidate[cntrow])
{
sum_invalid_i0=0;
sum_invalid_i1=0;
uvoi[0]=(int32_t)xyuvav->val[cntrow][2];
uvoi[1]=(int32_t)xyuvav->val[cntrow][3];
for(cnt1=-param_mimc2.vec_ocw[2];cnt1<=param_mimc2.vec_ocw[2];cnt1++)
{
for(cnt2=-param_mimc2.vec_ocw[2];cnt2<=param_mimc2.vec_ocw[2];cnt2++)
{
//if(!isnan(i0->val[cnt1][cnt2]))
if(i0->val[cnt1+uvoi[1]][cnt2+uvoi[0]]<0.00001)
{
sum_invalid_i0++;
}
if(i1->val[cnt1+uvoi[1]][cnt2+uvoi[0]]<0.00001)
{
sum_invalid_i1++;
}
}
}
if(sum_invalid_i0>thres_numpx || sum_invalid_i0>thres_numpx)
{
flag_cp_candidate[cntrow]=0;
num_cp_candidate--;
}
}
}
GMA_float_destroy(sarea_i0);
GMA_float_destroy(sarea_i1);
//dump the flag_cp_candidate for the debugging purpose
//GMA_uint8 *fcc=GMA_uint8_create(xyuvav->nrows,1);
//fcc->data=flag_cp_candidate;
//GMA_uint8_save("../MIMC2_C_GMA/flag_cp_candidate.GMA",fcc);
printf("Completed: %d grids\n",num_cp_candidate);
if(num_cp_candidate<param_mimc2.num_cp_min)
{
printf("Failed to extract sufficient amount of CP candidates (%d<%d)\n",num_cp_candidate,param_mimc2.num_cp_min);
return -1;
}
if(num_cp>num_cp_candidate)
{
num_cp=(int32_t)((float)num_cp_candidate*0.75);
printf("Not too much CP candidates(%d) - #CP threshold was adjusted to %d\n",num_cp_candidate,num_cp);
}
//extract the CP grids from xyuvav
cntrow1=0;
xyuvav_cp=GMA_double_create(num_cp_candidate,7);
duv_cp=GMA_float_create(num_cp_candidate,2);
for(cntrow=0;cntrow<xyuvav->nrows;cntrow++)
{
if(flag_cp_candidate[cntrow])
{
xyuvav_cp->val[cntrow1][0]=xyuvav->val[cntrow][0];
xyuvav_cp->val[cntrow1][1]=xyuvav->val[cntrow][1];
xyuvav_cp->val[cntrow1][2]=xyuvav->val[cntrow][2];
xyuvav_cp->val[cntrow1][3]=xyuvav->val[cntrow][3];
xyuvav_cp->val[cntrow1][4]=xyuvav->val[cntrow][4];
xyuvav_cp->val[cntrow1][5]=xyuvav->val[cntrow][5];
xyuvav_cp->val[cntrow1][6]=(double)cntrow; //row index of the grid chosen for the CP
cntrow1++;
}
}
//build up the uv_pivot for the extracted xyuvav_cp
//NOTE: All CP measurement make use of the common uv_pivot
int32_t num_pivot=(param_mimc2.AW_CRE*2+1)*(param_mimc2.AW_CRE*2+1);
uv_pivot_cp=GMA_int32_create(num_pivot,2);
int32_t cnt_pivot=0;
for(cnt1=-param_mimc2.AW_CRE;cnt1<=param_mimc2.AW_CRE;cnt1++)
{
for(cnt2=-param_mimc2.AW_CRE;cnt2<=param_mimc2.AW_CRE;cnt2++)
{
uv_pivot_cp->val[cnt_pivot][0]=cnt1;
uv_pivot_cp->val[cnt_pivot][1]=cnt2;
cnt_pivot++;
}
}
free(flag_cp_candidate);
//randomly mix the order of the rows in xyuvav_cp
GMA_double_randperm_row(xyuvav_cp);
//determine the range of the segment
//TODO: soft-code the numver of the segments, so that each segments has the grids whose mumbers are almost same as the CP threshold
int32_t num_segment=(num_cp_candidate<param_mimc2.num_cp_min) ? 1 : num_cp_candidate/num_cp;
printf("# segments=%d\n",num_segment);
array_index_segment=(int32_t*)malloc(sizeof(int32_t)*(num_segment+1));
array_index_segment[0]=0;
for(cnt=1;cnt<=num_segment;cnt++)
{
array_index_segment[cnt]=(int32_t)(num_cp_candidate*((float)cnt/(float)num_segment));
}
//Go through fature tracking/dpf1 determination in each segment
float sduv[2];
sduv[0]=0.0;
sduv[1]=0.0;
int32_t num_cp_current=0;
// int32_t flag_is362273=-1; //for debugging
// char filename_xyuvav_sub[1024];
for(cnt=0;cnt<num_segment;cnt++)
{
printf("CP measurement attempt: %d/%d - ",cnt+1,num_segment);
int32_t num_grid_sub=array_index_segment[cnt+1]-array_index_segment[cnt];
//allocate dp_cp_sub to store the feature tracking results of the segment
for(cnt1=0;cnt1<16;cnt1++) //NOTE: the "16" means the # of matching attempts per grid in CP measurement
{
dp_cp_sub[cnt1]=GMA_float_create(num_grid_sub,3);
}
//build up xyuvav_cp_sub
xyuvav_cp_sub=GMA_double_create(num_grid_sub,7);
//printf("Extracting xyuvav of the segment\n");
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
xyuvav_cp_sub->val[cnt1][0]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][0];
xyuvav_cp_sub->val[cnt1][1]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][1];
xyuvav_cp_sub->val[cnt1][2]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][2];
xyuvav_cp_sub->val[cnt1][3]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][3];
xyuvav_cp_sub->val[cnt1][4]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][4];
xyuvav_cp_sub->val[cnt1][5]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][5];
xyuvav_cp_sub->val[cnt1][6]=xyuvav_cp->val[array_index_segment[cnt]+cnt1][6];
/*
if((int32_t)(xyuvav_cp_sub->val[cnt1][6]+0.5)==362272)
{
printf("Heads up. Current segment contains the 362273rd segment\n");
flag_is362273=cnt1;
}
sprintf(filename_xyuvav_sub,"../MIMC2_C_GMA/xyuvav_sub_%02d.GMA",cnt);
GMA_double_save(filename_xyuvav_sub,xyuvav_cp_sub);
*/
}
//GMA_double_save("../MIMC2_C_GMA/xyuvav_cp_sub.GMA",xyuvav_cp_sub);
//build up the image chip array
imgchip_i0=(GMA_float**)malloc(sizeof(GMA_float*)*num_grid_sub);
imgchip_i1=(GMA_float**)malloc(sizeof(GMA_float*)*num_grid_sub);
//printf("Initiating CP image matching\n");
for(cnt2=-1;cnt2<=2;cnt2++) //index for kernel
{
if(cnt2<0)
{//original image
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
//just extract the image chips; no colvolution
int32_t uv_center[2];
uv_center[0]=(int32_t)xyuvav_cp_sub->val[cnt1][2];
uv_center[1]=(int32_t)xyuvav_cp_sub->val[cnt1][3];
imgchip_i0[cnt1]=GMA_float_create(ocw_chip*2+1,ocw_chip*2+1);
imgchip_i1[cnt1]=GMA_float_create(ocw_chip*2+1,ocw_chip*2+1);
for(cntv=-ocw_chip;cntv<=ocw_chip;cntv++)
{
for(cntu=-ocw_chip;cntu<=ocw_chip;cntu++)
{
imgchip_i0[cnt1]->val[cntv+ocw_chip][cntu+ocw_chip]=i0->val[uv_center[1]+cntv][uv_center[0]+cntu];
imgchip_i1[cnt1]->val[cntv+ocw_chip][cntu+ocw_chip]=i1->val[uv_center[1]+cntv][uv_center[0]+cntu];
}
}
}
}
else
{//convoluted image chips
GMA_float *imgchip_i0_temp=GMA_float_create(ocw_chip*2+3,ocw_chip*2+3);
GMA_float *imgchip_i1_temp=GMA_float_create(ocw_chip*2+3,ocw_chip*2+3);
GMA_float *imgchip_i0_temp_out=GMA_float_create(ocw_chip*2+3,ocw_chip*2+3);
GMA_float *imgchip_i1_temp_out=GMA_float_create(ocw_chip*2+3,ocw_chip*2+3);
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
int32_t uv_center[2];
uv_center[0]=(int32_t)xyuvav_cp_sub->val[cnt1][2];
uv_center[1]=(int32_t)xyuvav_cp_sub->val[cnt1][3];
imgchip_i0[cnt1]=GMA_float_create(ocw_chip*2+1,ocw_chip*2+1);
imgchip_i1[cnt1]=GMA_float_create(ocw_chip*2+1,ocw_chip*2+1);
for(cntv=-ocw_chip-1;cntv<=ocw_chip+1;cntv++)
{
for(cntu=-ocw_chip-1;cntu<=ocw_chip+1;cntu++)
{
imgchip_i0_temp->val[ocw_chip+1+cntv][ocw_chip+1+cntu]=i0->val[uv_center[1]+cntv][uv_center[0]+cntu];
imgchip_i1_temp->val[ocw_chip+1+cntv][ocw_chip+1+cntu]=i1->val[uv_center[1]+cntv][uv_center[0]+cntu];
}
}
GMA_float_conv2(imgchip_i0_temp,kernel[cnt2],imgchip_i0_temp_out);
GMA_float_conv2(imgchip_i1_temp,kernel[cnt2],imgchip_i1_temp_out);
//copy the convoluted image chip to array
for(cntv=-ocw_chip;cntv<=ocw_chip;cntv++)
{
for(cntu=-ocw_chip;cntu<=ocw_chip;cntu++)
{
imgchip_i0[cnt1]->val[cntv+ocw_chip][cntu+ocw_chip]=imgchip_i0_temp_out->val[cntv+ocw_chip+1][cntu+ocw_chip+1];
imgchip_i1[cnt1]->val[cntv+ocw_chip][cntu+ocw_chip]=imgchip_i1_temp_out->val[cntv+ocw_chip+1][cntu+ocw_chip+1];
}
}
}
GMA_float_destroy(imgchip_i0_temp);
GMA_float_destroy(imgchip_i1_temp);
GMA_float_destroy(imgchip_i0_temp_out);
GMA_float_destroy(imgchip_i1_temp_out);
}
/*
if(flag_is362273>=0) //for debugging
{
sprintf(filename_xyuvav_sub,"../MIMC2_C_GMA/i0chip_%d.GMA",cnt2+1);
GMA_float_save(filename_xyuvav_sub,imgchip_i0[flag_is362273]);
sprintf(filename_xyuvav_sub,"../MIMC2_C_GMA/i1chip_%d.GMA",cnt2+1);
GMA_float_save(filename_xyuvav_sub,imgchip_i1[flag_is362273]);
}*/
// image chip build complete. Proceeding to the image matching
//TODO: parallelize this part
float uvncc_cp[3];
int32_t ocw_refchip;
GMA_float *refchip;
for(cnt3=1;cnt3<3;cnt3++)//counter for ocw
{
ocw_refchip=param_mimc2.vec_ocw[cnt3];
//printf("ocw=%d, kernel=%d\n",cnt3,cnt2);
//printf("%d...",cnt3+(cnt2-1)*2);
//printf("...%d",(cnt3-1)*4+cnt2+1);
#pragma omp parallel private(refchip,cntv,cntu,uvncc_cp) shared(dp_cp_sub)
{
refchip=GMA_float_create(ocw_refchip*2+1,ocw_refchip*2+1);
#pragma omp for schedule(dynamic) //TODO: consider parallelizing at upper level of the loop
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
//original forward matching
//extract refchip from i0
for(cntv=-ocw_refchip;cntv<=ocw_refchip;cntv++)
{
for(cntu=-ocw_refchip;cntu<=ocw_refchip;cntu++)
{
refchip->val[ocw_refchip+cntv][ocw_refchip+cntu]=imgchip_i0[cnt1]->val[ocw_chip+cntv][ocw_chip+cntu];
}
}
//prepare for the sarea from i1 -> imgchip_i1[cnt1]
//perform feature tracking
find_ncc_peak(refchip, imgchip_i1[cnt1], uv_pivot_cp, uvncc_cp);
dp_cp_sub[(cnt3-1)*8+(cnt2+1)*2]->val[cnt1][0]=uvncc_cp[0];
dp_cp_sub[(cnt3-1)*8+(cnt2+1)*2]->val[cnt1][1]=uvncc_cp[1];
dp_cp_sub[(cnt3-1)*8+(cnt2+1)*2]->val[cnt1][2]=uvncc_cp[2];
//swapped forward matching
//prepare for the refchip from i1
for(cntv=-ocw_refchip;cntv<=ocw_refchip;cntv++)
{
for(cntu=-ocw_refchip;cntu<=ocw_refchip;cntu++)
{
refchip->val[ocw_refchip+cntv][ocw_refchip+cntu]=imgchip_i1[cnt1]->val[ocw_chip+cntv][ocw_chip+cntu];
}
}
//prepare for the sarea from i0 -> imgchip_i0[cnt1]
//perform feature tracking
//NOTE: No need to reverse the nc_pivot_cp (rectangular-shaped distribution)
find_ncc_peak(refchip, imgchip_i0[cnt1], uv_pivot_cp, uvncc_cp);
dp_cp_sub[(cnt3-1)*8+(cnt2+1)*2+1]->val[cnt1][0]=-uvncc_cp[0];
dp_cp_sub[(cnt3-1)*8+(cnt2+1)*2+1]->val[cnt1][1]=-uvncc_cp[1];
dp_cp_sub[(cnt3-1)*8+(cnt2+1)*2+1]->val[cnt1][2]=uvncc_cp[2];
}//for(cnt1=0;cnt1<num_grid_sub;cnt1++)
GMA_float_destroy(refchip);
}//#pragma omp parallel private(refchip,cntv,cntu,uvncc_cp) shared(dp_cp_sub)
}//for(cnt3=1;cnt3<3;cnt3++)//counter for ocw
//de-allocate the used image chips
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
GMA_float_destroy(imgchip_i0[cnt1]);
GMA_float_destroy(imgchip_i1[cnt1]);
}
}//for(cnt2=-1;cnt2<=2;cnt2++)//counter for the kernel
//printf("\n");
//cluster the dp_cp_sub
mvn_dp_cp_sub=calc_mean_var_num_dp_cluster(dp_cp_sub,16);
free(imgchip_i0);
free(imgchip_i1);
//find the prominent displacements
int32_t id_xyuvav;
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
int32_t cnt_cluster;
for(cnt_cluster=0;cnt_cluster<mvn_dp_cp_sub[cnt1]->nrows;cnt_cluster++)
{
if(mvn_dp_cp_sub[cnt1]->val[cnt_cluster][4]>=0.6)
{
id_xyuvav=(int32_t)(xyuvav_cp_sub->val[cnt1][6]);
sduv[0]+=mvn_dp_cp_sub[cnt1]->val[cnt_cluster][0];
sduv[1]+=mvn_dp_cp_sub[cnt1]->val[cnt_cluster][1];
flag_cp->val[id_xyuvav][0]=1;
num_cp_current++;
}
}
}
//de-allocate mvn_dp_cp_sub
for(cnt1=0;cnt1<num_grid_sub;cnt1++)
{
GMA_float_destroy(mvn_dp_cp_sub[cnt1]);
}
free(mvn_dp_cp_sub);
if(num_cp<=num_cp_current)
{
flag_sucessful_ft_cp=1;
printf("Sufficient # of CP found: %d>=%d\n",num_cp_current,num_cp);
GMA_double_destroy(xyuvav_cp_sub);
break;
}
else
{
GMA_double_destroy(xyuvav_cp_sub);
printf("#CP found so far: %d\n",num_cp_current);
}
//TODO: dp_cp_sub might be leak
for(cnt1=0;cnt1<16;cnt1++) //NOTE: the "16" means the # of matching attempts per grid in CP measurement
{
//dp_cp_sub[cnt1]=GMA_float_create(num_grid_sub,3);
GMA_float_destroy(dp_cp_sub[cnt1]);
}
}//for(cnt=0;cnt<20;cnt++)
free(array_index_segment);
//Consider the image pair to be valid if if has more CPs than (or equal to) the least number of thresholds
if(num_cp_current<num_cp && num_cp_current>=param_mimc2.num_cp_min)
{
flag_sucessful_ft_cp=1;
printf("Least number of CPs found(%d>=%d). Considering this pair has enough CPs\n",num_cp_current,param_mimc2.num_cp_min);
}
//calculat the offset if the vmap pair is a valid one
if(flag_sucessful_ft_cp)
{
float du_cp=sduv[0]/(float)num_cp_current;
float dv_cp=sduv[1]/(float)num_cp_current;
if(du_cp>0)
{
offset[0]=(int32_t)(du_cp+0.5);
}
else
{
offset[0]=(int32_t)(du_cp-0.5);
}
if(dv_cp>0)
{
offset[1]=(int32_t)(dv_cp+0.5);
}
else
{
offset[1]=(int32_t)(dv_cp-0.5);
}
}
else
{
printf("Not enough # of successful CP measurement (%d<%d)\n",num_cp_current,param_mimc2.num_cp_min);
GMA_double_destroy(xyuvav_cp);
GMA_int32_destroy(uv_pivot_cp);
return -1;
}
//deallocate the variables
GMA_double_destroy(xyuvav_cp);
GMA_int32_destroy(uv_pivot_cp);
//free(flag_cp);
//revert the original value of num_dp;
num_dp=num_dp_original;
return 1;
}
void GMA_double_randperm_row(GMA_double *var)
{
GMA_double *var_temp=GMA_double_create(var->nrows,var->ncols);
int32_t cntrow,cntcol;
int32_t limit_idx_row=var->nrows-1;
int32_t idx_row;
double N_A_N=sqrt(-1.0);
var_temp=GMA_double_create(var->nrows,var->ncols);
//duplicate the input GMA array. Also empty rhe
for(cntrow=0;cntrow<var->nrows;cntrow++)
{
for(cntcol=0;cntcol<var->ncols;cntcol++)
{
var_temp->val[cntrow][cntcol]=var->val[cntrow][cntcol];
var->val[cntrow][cntcol]=N_A_N;
}
}
//deterimne the vector of the row order
srand(time(NULL));
for(limit_idx_row=var_temp->nrows-1;limit_idx_row>=0;limit_idx_row--)
{
//determine the ID to put
if(limit_idx_row!=0)
{
idx_row=(int32_t)(rand()%limit_idx_row);
}
else
{
idx_row=0;
}
for(cntcol=0;cntcol<var->ncols;cntcol++)
{
var->val[limit_idx_row][cntcol]=var_temp->val[idx_row][cntcol];
//move the head row value to the idx_row
var_temp->val[idx_row][cntcol]=var_temp->val[0][cntcol];
//move the tail row in var_temp to the head
var_temp->val[0][cntcol]=var_temp->val[limit_idx_row][cntcol];
}
}
GMA_double_destroy(var_temp);
}
GMA_int32** get_uv_pivot(GMA_double *xyuvav, float dt, param param_mimc2, int32_t ocw, GMA_float *i1)
{ //TODO make this module more efficient
//unsigned int numgrid=xyuvav->nrows;
int32_t cnt,cnt1;
int32_t num_pivot;
double length_pivot;
float u,v,incr_u,incr_v,norm_incr;
float theta;
GMA_int32 **out=malloc(sizeof(GMA_int32*)*(xyuvav->nrows));
//printf("Calculating uv_pivot\n");
for(cnt=0;cnt<xyuvav->nrows;cnt++)
{
u=0.0;
v=0.0;
//TODO: Avoid using the triangular function. Replace them with the utilization of unit vector
theta=atan2(xyuvav->val[cnt][5],xyuvav->val[cnt][4]);
incr_u=cos(theta);
incr_v=sin(theta);
if(fabs(incr_u)>fabs(incr_v)) //normalize the increment (to avoid duplicated pivots)
{
incr_u=incr_u/fabs(incr_u);
incr_v=incr_v/fabs(incr_u);
}
else
{
incr_u=incr_u/fabs(incr_v);
incr_v=incr_v/fabs(incr_v);
}
norm_incr=sqrt(incr_u*incr_u+incr_v*incr_v);
length_pivot=sqrt(xyuvav->val[cnt][4]*xyuvav->val[cnt][4]+xyuvav->val[cnt][5]*xyuvav->val[cnt][5])/param_mimc2.mpp/365*dt*param_mimc2.AW_SF+param_mimc2.AW_CRE+1;
//count up the number of pivots
num_pivot=0;
while(u+(float)(xyuvav->val[cnt][2])-(float)ocw>0 &&
u+(float)(xyuvav->val[cnt][2])+(float)ocw<(float)(i1->ncols-1) &&
v+(float)(xyuvav->val[cnt][3])-(float)ocw>0 &&
v+(float)(xyuvav->val[cnt][3])+(float)ocw<(float)(i1->nrows-1) &&
length_pivot>(double)(norm_incr*(double)num_pivot))
{
num_pivot++;
u+=incr_u;
v+=incr_v;
}
u=0.0;
v=0.0;
out[cnt]=GMA_int32_create(num_pivot,2);
out[cnt]->val[0][0]=0;
out[cnt]->val[0][1]=0;
for(cnt1=1;cnt1<num_pivot;cnt1++)
{
u+=incr_u;
v+=incr_v;
out[cnt]->val[cnt1][0]=(int32_t)(u+0.5);
out[cnt]->val[cnt1][1]=-(int32_t)(v+0.5);
}
}
printf("\n");
return out;
}
char investigate_valid_grid(GMA_float *refchip,GMA_float *sarea)
{
char out;
int32_t num_invalid_refchip=0,num_invalid_sarea=0;
float std_refchip,std_sarea;
int32_t cntu,cntv;
float numpx_refchip,numpx_sarea;
float max_ratio_invalid=0.8;
numpx_refchip=(float)(refchip->nrows*refchip->ncols);
numpx_sarea=(float)(sarea->nrows*sarea->ncols);
//check refchip
for(cntu=0;cntu<refchip->ncols;cntu++)
{
for(cntv=0;cntv<refchip->nrows;cntv++)
{
if(refchip->val[cntv][cntu]<MIN_DN) num_invalid_refchip++;
}
}
//check sarea
for(cntu=0;cntu<sarea->ncols;cntu++)
{
for(cntv=0;cntv<sarea->nrows;cntv++)
{
if(sarea->val[cntv][cntu]<MIN_DN) num_invalid_sarea++;
}
}
if((float)num_invalid_refchip/numpx_refchip>max_ratio_invalid || (float)num_invalid_sarea/numpx_sarea>max_ratio_invalid)
{
out=0;
}
else
{
out=1;
}
return out;
}
void find_ncc_peak(GMA_float *refchip, GMA_float *sarea, GMA_int32 *uv_pivot, float *uvncc)
{
//TODO: A special mode of this function is necessary, especially when there is no void area line
GMA_float *cmap;
int32_t cnt_grid,cnt_pivot;
int dx2,dy2,Dx2,Dy2,ocw;
int duv[2],pivot[2];
int uv_peak[2];
float nccmax;
int32_t cnt1,cnt2,cnt3,cnt4;
int32_t uv0[2];
const float N_A_N=sqrt(-1.0);
int32_t nsample, flag_newncc;
double sx,sy,sxx,syy,sxy;
float ncc9[9];
double coeff_poly[6];
Dx2=(int)(sarea->ncols);
Dy2=(int)(sarea->nrows);
dx2=Dx2/2;
dy2=Dy2/2;
ocw=(int)(refchip->nrows)/2;
uv_peak[0]=dx2;
uv_peak[1]=dy2;
uvncc[0]=0.0;
uvncc[1]=0.0;
uvncc[2]=-2.0;
//create cmap and sarea
cmap=GMA_float_create(Dy2,Dx2);
for(cnt1=-dx2;cnt1<dx2;cnt1++) //TODO: re-think about the loop boundary
{
for(cnt2=-dy2;cnt2<dy2;cnt2++) cmap->val[cnt2+dy2][cnt1+dx2]=-2.0;
}
if(!investigate_valid_grid(refchip,sarea))
{
uvncc[0]=N_A_N;
uvncc[1]=N_A_N;
uvncc[2]=-3;
}
else
{
for(cnt_pivot=0;cnt_pivot<uv_pivot->nrows;cnt_pivot++) //loop through the pivot point
{
pivot[0]=uv_pivot->val[cnt_pivot][0]+dx2; //initial pivot
pivot[1]=uv_pivot->val[cnt_pivot][1]+dy2;
nccmax=-2;
duv[0]=-1;
duv[1]=-1;
flag_newncc=1; //fake values to get into the while loop
while((duv[0]!=0||duv[1]!=0)&&flag_newncc!=0)
{
duv[0]=0;
duv[1]=0;
if(pivot[0]-ocw<=1 || pivot[0]+ocw>=Dx2-1 || pivot[1]-ocw<=1 || pivot[1]+ocw>=Dy2-1) //boundary check
{
break;
}
flag_newncc=0;
for(cnt1=-1;cnt1<=1;cnt1++)
{
for(cnt2=-1;cnt2<=1;cnt2++)
{
if(cmap->val[pivot[1]+cnt2][pivot[0]+cnt1]<-1.0) //when NCC is not calculated
{
//calculate NCC for the location [pivot[0]+cnt1][pivot[1]+cnt2]
flag_newncc++;
nsample=0;
sy=0; sx=0; sxx=0; sxy=0; syy=0;
for(cnt3=-ocw;cnt3<=ocw;cnt3++)
{
for(cnt4=-ocw;cnt4<=ocw;cnt4++)
{
if(refchip->val[cnt4+ocw][cnt3+ocw]>=MIN_DN && sarea->val[pivot[1]+cnt2+cnt4][pivot[0]+cnt1+cnt3] >=MIN_DN)
{ //Concept of null exclusion. Refer to Ahn and Howat [2011]
nsample++;
sy+=sarea->val[pivot[1]+cnt2+cnt4][pivot[0]+cnt1+cnt3];
sx+=refchip->val[cnt4+ocw][cnt3+ocw];
sxx+=refchip->val[cnt4+ocw][cnt3+ocw]*refchip->val[cnt4+ocw][cnt3+ocw];
syy+=sarea->val[pivot[1]+cnt2+cnt4][pivot[0]+cnt1+cnt3]*sarea->val[pivot[1]+cnt2+cnt4][pivot[0]+cnt1+cnt3];
sxy+=refchip->val[cnt4+ocw][cnt3+ocw]*sarea->val[pivot[1]+cnt2+cnt4][pivot[0]+cnt1+cnt3];
}
}
}
cmap->val[pivot[1]+cnt2][pivot[0]+cnt1]=(float)((nsample*sxy-sx*sy)/sqrt((nsample*sxx-sx*sx)*(nsample*syy-sy*sy))); //ncc calculation in spatial domain
}
if(cmap->val[pivot[1]+cnt2][pivot[0]+cnt1] > nccmax)
{
nccmax=cmap->val[pivot[1]+cnt2][pivot[0]+cnt1];
duv[0]=cnt1;
duv[1]=cnt2;
}
}
}
pivot[0]+=duv[0];
pivot[1]+=duv[1];
}
if(nccmax>uvncc[2])
{
uv_peak[0]=pivot[0];
uv_peak[1]=pivot[1];
uvncc[2]=nccmax;
}
} //peak location along with the NCC is now at the array uvncc
// quadratic fitting of the NCC peak
ncc9[0]=cmap->val[uv_peak[1]-1][uv_peak[0]-1];
ncc9[1]=cmap->val[uv_peak[1]-1][uv_peak[0]];
ncc9[2]=cmap->val[uv_peak[1]-1][uv_peak[0]+1];
ncc9[3]=cmap->val[uv_peak[1]][uv_peak[0]-1];
ncc9[4]=cmap->val[uv_peak[1]][uv_peak[0]];
ncc9[5]=cmap->val[uv_peak[1]][uv_peak[0]+1];
ncc9[6]=cmap->val[uv_peak[1]+1][uv_peak[0]-1];
ncc9[7]=cmap->val[uv_peak[1]+1][uv_peak[0]];
ncc9[8]=cmap->val[uv_peak[1]+1][uv_peak[0]+1];
//TODO: deal with the case that the peak is at the edges of the cmap
//printf("Calculating Coefficients...");
coeff_poly[0]=6*ncc9[0] -12*ncc9[1] +6*ncc9[2] +6*ncc9[3] -12*ncc9[4] +6*ncc9[5] +6*ncc9[6] -12*ncc9[7] +6*ncc9[8];
coeff_poly[1]=9*ncc9[0] -9*ncc9[2] -9*ncc9[6] +9*ncc9[8];
coeff_poly[2]=6*ncc9[0] +6*ncc9[1] +6*ncc9[2] -12*ncc9[3] -12*ncc9[4] -12*ncc9[5] +6*ncc9[6] +6*ncc9[7] +6*ncc9[8];
coeff_poly[3]=-6*ncc9[0] +6*ncc9[2] -6*ncc9[3] +6*ncc9[5] -6*ncc9[6] +6*ncc9[8];
coeff_poly[4]=-6*ncc9[0] -6*ncc9[1] -6*ncc9[2] +6*ncc9[6] +6*ncc9[7] +6*ncc9[8];
coeff_poly[5]=-4*ncc9[0] +8*ncc9[1] -4*ncc9[2] +8*ncc9[3] +20*ncc9[4] +8*ncc9[5] -4*ncc9[6] +8*ncc9[7] -4*ncc9[8];
coeff_poly[0]/=36;
coeff_poly[1]/=36;
coeff_poly[2]/=36;
coeff_poly[3]/=36;
coeff_poly[4]/=36;
coeff_poly[5]/=36;
//printf("Calculating fitting results...");
uvncc[0]=-2*coeff_poly[2]*coeff_poly[3]+coeff_poly[1]*coeff_poly[4];
uvncc[1]=-2*coeff_poly[0]*coeff_poly[4]+coeff_poly[1]*coeff_poly[3];
uvncc[0]/=4*coeff_poly[0]*coeff_poly[2]-coeff_poly[1]*coeff_poly[1];
uvncc[1]/=4*coeff_poly[0]*coeff_poly[2]-coeff_poly[1]*coeff_poly[1];
uvncc[0]+=(float)(uv_peak[0]-dx2);
uvncc[1]+=(float)(uv_peak[1]-dy2);
//TODO: also calculate fitted NCC peak value if there is no significant computation resource usage
//TODO: Cancel the sub-pixel interpolation if the fitting result goes too far from the original peak result
//printf("Successful! - [%f, %f]\n\n",uvncc[0],uvncc[1]);
}
GMA_float_destroy(cmap);
//printf("\n");
}
//GMA_float* matching_ncc_dlc_2(GMA_float *i0,GMA_float *i1,GMA_double *xyuvav,GMA_int32 **uv_pivot,int32_t ocw,float AW_CRE,float AW_SF)
GMA_float* matching_ncc_dlc_2(GMA_float *i0,GMA_float *i1,GMA_double *xyuvav,int32_t *offset,GMA_int32 **uv_pivot,int32_t ocw,float AW_CRE,float AW_SF)
{
GMA_float *out=GMA_float_create(xyuvav->nrows,3); //n*3 matrix
GMA_float *refchip,*sarea;
float uvncc[3];
int32_t cnt_grid;
//unsigned int uv0[2];
int32_t uv0[2];
int32_t cnt1,cnt2,cnt3,cnt4;
//refchip=GMA_float_create((unsigned int)ocw*2+1,(unsigned int)ocw*2+1);
#pragma omp parallel private(uv0,uvncc,refchip,sarea) shared(out)
{
refchip=GMA_float_create(ocw*2+1,ocw*2+1);
#pragma omp for schedule(dynamic)
for(cnt_grid=0;cnt_grid<xyuvav->nrows;cnt_grid++)
{
uv0[0]=(int32_t)xyuvav->val[cnt_grid][2];
uv0[1]=(int32_t)xyuvav->val[cnt_grid][3];
extract_refchip(i0,uv0,ocw,refchip);
uv0[0]+=offset[0];
uv0[1]+=offset[1];
sarea=extract_sarea(i1, uv0, ocw, uv_pivot[cnt_grid]);
find_ncc_peak(refchip, sarea, uv_pivot[cnt_grid], uvncc);
out->val[cnt_grid][0]=uvncc[0];
out->val[cnt_grid][1]=uvncc[1];
out->val[cnt_grid][2]=uvncc[2];
GMA_float_destroy(sarea);
}
GMA_float_destroy(refchip);
}
return out;
}
void extract_refchip(GMA_float *i0, int32_t *uv0, int32_t ocw,GMA_float *refchip)
{
int32_t cnt1,cnt2;
for(cnt1=-ocw;cnt1<=ocw;cnt1++)
{
for(cnt2=-ocw;cnt2<=ocw;cnt2++)
{
refchip->val[cnt2+ocw][cnt1+ocw]=i0->val[uv0[1]+cnt2][uv0[0]+cnt1];
}
}
}
GMA_float* extract_sarea(GMA_float *i1, int32_t *uv0, int32_t ocw, GMA_int32 *uv_pivot)
{
GMA_float *sarea;
int32_t cnt1,cnt2;
int dx2, Dx2, dy2, Dy2;
dx2=abs(uv_pivot->val[uv_pivot->nrows-1][0])+ocw+2;
dy2=abs(uv_pivot->val[uv_pivot->nrows-1][1])+ocw+2;
Dx2=dx2*2+1;
Dy2=dy2*2+1;
sarea=GMA_float_create(Dy2,Dx2);
for(cnt1=-dx2;cnt1<dx2;cnt1++)
{
for(cnt2=-dy2;cnt2<dy2;cnt2++)
{
int32_t coord_u,coord_v;
coord_u=uv0[0]+cnt1;
coord_v=uv0[1]+cnt2;
//boundary check
if(coord_u>=0 && coord_u<i1->ncols && coord_v>=0 && coord_v<i1->nrows)
{
sarea->val[cnt2+dy2][cnt1+dx2]=i1->val[coord_v][coord_u];
}
else
{
sarea->val[cnt2+dy2][cnt1+dx2]=0.0;
}
}
}
return sarea;
}
GMA_float** mimc2_postprocess(GMA_float **dp, GMA_double *xyuvav, float dt)
{
int32_t cnt;
GMA_float **vxyexyqual;
GMA_float **mvn_dp;
GMA_int32 *dpf0; //TODO: use GMA_int8 instead of GMA_int32 (for memory efficiency)
GMA_int32 *ruv_neighbor;
GMA_uint8 *mask_nomatching;
GMA_float *dpf_dx, *dpf_dy;
float N_A_N=sqrt(-1.0);
printf("Calculating mean/std/qual (MSQ):\n");
mvn_dp=calc_mean_var_num_dp_cluster(dp,num_dp);
printf("MSQ calculation successful\n\n");
printf("Finding grids without any matching results");
mask_nomatching=get_mask_nomatching(mvn_dp);
//determine dpf0
printf("Determining prominent displacements\n");
dpf0=get_dpf0(mvn_dp,0.6); //TODO: soft-code the threshold
printf("Successful\n\n");
//GMA_int32_save("../MIMC2_C_GMA/dpf0.GMA",dpf0); //for checking the result
//Debug code
printf("Calculating meighboring grid mask for dpf2\n");
ruv_neighbor=get_ruv_neighbor(xyuvav,param_mimc2.radius_neighbor_dpf1);
printf("Successful: %d neignboring grids\n\n",ruv_neighbor->nrows);
printf("Calculating initial most probable displacements (dpf2)\n");
dpf_dx=GMA_float_create(dimy_vmap,dimx_vmap);
dpf_dy=GMA_float_create(dimy_vmap,dimx_vmap);
get_dpf1(dpf0,dpf_dx,dpf_dy,ruv_neighbor,mvn_dp,xyuvav);
printf("Successful\n\n");
//Perform pseudosmoothing
printf("Performing pseudosmoothing\n");
GMA_int32_destroy(ruv_neighbor);
ruv_neighbor=get_ruv_neighbor(xyuvav,param_mimc2.radius_neighbor_ps);
get_dpf_pseudosmoothing(dpf0,dpf_dx,dpf_dy,ruv_neighbor,mvn_dp,xyuvav);
printf("pseudosmoothing successful!\n\n");
//convert the cluster map (i.e. dpf0) into vmap (i.e. vxyexyqual)
vxyexyqual=(GMA_float**)malloc(sizeof(GMA_float*)*5);
vxyexyqual[0]=GMA_float_create(dimy_vmap,dimx_vmap); //vx
vxyexyqual[1]=GMA_float_create(dimy_vmap,dimx_vmap); //vy
vxyexyqual[2]=GMA_float_create(dimy_vmap,dimx_vmap); //ex
vxyexyqual[3]=GMA_float_create(dimy_vmap,dimx_vmap); //ey
vxyexyqual[4]=GMA_float_create(dimy_vmap,dimx_vmap); //qual
printf("checkpoint 1\n");
int32_t cntv,cntu;
int32_t id_vec;
int32_t id_cluster;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
id_vec=cntv*dimx_vmap+cntu;
id_cluster=dpf0->val[cntv][cntu];
if(id_cluster>=0)
{
vxyexyqual[0]->val[cntv][cntu]=mvn_dp[id_vec]->val[id_cluster][0];
vxyexyqual[1]->val[cntv][cntu]=mvn_dp[id_vec]->val[id_cluster][1];
vxyexyqual[2]->val[cntv][cntu]=mvn_dp[id_vec]->val[id_cluster][2];
vxyexyqual[3]->val[cntv][cntu]=mvn_dp[id_vec]->val[id_cluster][3];
vxyexyqual[4]->val[cntv][cntu]=mvn_dp[id_vec]->val[id_cluster][4];
}
else
{
vxyexyqual[0]->val[cntv][cntu]=N_A_N;
vxyexyqual[1]->val[cntv][cntu]=N_A_N;
vxyexyqual[2]->val[cntv][cntu]=N_A_N;
vxyexyqual[3]->val[cntv][cntu]=N_A_N;
vxyexyqual[4]->val[cntv][cntu]=N_A_N;
}
}
}
printf("checkpoint 2\n");
//deallocate the allocated variables
GMA_float_destroy(dpf_dx);
GMA_float_destroy(dpf_dy);
GMA_int32_destroy(ruv_neighbor);
GMA_int32_destroy(dpf0);
GMA_uint8_destroy(mask_nomatching);
printf("checkpoint 3\n");
for(cnt=0;cnt<num_grid;cnt++)
{
GMA_float_destroy(mvn_dp[cnt]);
}
free(mvn_dp);
printf("checkpoint 4\n");
return vxyexyqual;
}
GMA_float** calc_mean_var_num_dp_cluster(GMA_float** dp,int32_t num_dpoi)
{
int32_t cnt_grid,cnt_dp,num_valid_dp;
uint8_t cnt_id,max_id;
int32_t num_grid=dp[0]->nrows;
GMA_float **out;
GMA_uint8 id_cluster;
GMA_float dp_stack;
float min_ncc=0.1;
float sx[num_dpoi],sxx[num_dpoi],sy[num_dpoi],syy[num_dpoi];
int nsample[num_dpoi];
//float *sx, *sxx, *sy, *syy;
//int *nsample;
//sx=malloc(sizeof(float)*num_dpoi);
//sxx=malloc(sizeof(float)*num_dpoi);
//sy=malloc(sizeof(float)*num_dpoi);
//syy=malloc(sizeof(float)*num_dpoi);
//nsample=malloc(sizeof(int)*num_dpoi);
float min_dist=0.5; //TODO: soft-code this
//printf("Allocating output values\n");
out=(GMA_float**)malloc(sizeof(GMA_float*)*dp[0]->nrows);
//printf("Initializing dp_stack\n");
//initialze dp_stack
dp_stack.nrows=num_dpoi;
dp_stack.ncols=2;
dp_stack.val=(float**)malloc(sizeof(float*)*dp_stack.nrows);
for(cnt_dp=0;cnt_dp<num_dpoi;cnt_dp++)
{
dp_stack.val[cnt_dp]=(float*)malloc(sizeof(float)*dp_stack.ncols);
}
//TODO: modify the part above to be in compliance with the struct definition (i.e. allocate space at dp_stack->data)
//initialize id_cluster
//printf("Initializing id_cluster\n");
id_cluster.nrows=num_dpoi;
id_cluster.ncols=1;
id_cluster.val=(uint8_t**)malloc(sizeof(uint8_t*)*dp_stack.nrows);
for(cnt_dp=0;cnt_dp<num_dpoi;cnt_dp++)
{
id_cluster.val[cnt_dp]=(uint8_t*)malloc(sizeof(float)*id_cluster.ncols);
}
//TODO: parallelize this part
//printf("Looping through grid\n");
for(cnt_grid=0;cnt_grid<num_grid;cnt_grid++)
{
//build a stack
num_valid_dp=0;
for(cnt_dp=0;cnt_dp<num_dpoi;cnt_dp++)
{
if(dp[cnt_dp]->val[cnt_grid][2]>min_ncc)
{
dp_stack.val[num_valid_dp][0]=dp[cnt_dp]->val[cnt_grid][0];
dp_stack.val[num_valid_dp][1]=dp[cnt_dp]->val[cnt_grid][1];
num_valid_dp++;
}
//trim out the tail
dp_stack.nrows=num_valid_dp;
id_cluster.nrows=num_valid_dp;
}
//perform clustering
cluster_euclidian(&dp_stack, min_dist, &id_cluster);
//find the maximum value in id_cluster
max_id=0;
for(cnt_dp=0;cnt_dp<id_cluster.nrows;cnt_dp++)
{
if(max_id<id_cluster.val[cnt_dp][0])
{
max_id=id_cluster.val[cnt_dp][0];
}
}
out[cnt_grid]=GMA_float_create(max_id,5);
for(cnt_dp=0;cnt_dp<max_id;cnt_dp++)
{
sx[cnt_dp]=0;
sy[cnt_dp]=0;
sxx[cnt_dp]=0;
syy[cnt_dp]=0;
nsample[cnt_dp]=0;
}
//[1111111, 2222222, 333333, 444444, 5555555555]
//[mean_vx, mean_vy, var_vx, var_vy, num_sample]
for(cnt_dp=0;cnt_dp<id_cluster.nrows;cnt_dp++)
{
sx[id_cluster.val[cnt_dp][0]-1]+=dp_stack.val[cnt_dp][0];
sy[id_cluster.val[cnt_dp][0]-1]+=dp_stack.val[cnt_dp][1];
sxx[id_cluster.val[cnt_dp][0]-1]+=dp_stack.val[cnt_dp][0]*dp_stack.val[cnt_dp][0];
syy[id_cluster.val[cnt_dp][0]-1]+=dp_stack.val[cnt_dp][1]*dp_stack.val[cnt_dp][1];
nsample[id_cluster.val[cnt_dp][0]-1]+=1;
}
//each row contains the statistics of each cluster
for(cnt_dp=0;cnt_dp<max_id;cnt_dp++)
{
out[cnt_grid]->val[cnt_dp][0]=sx[cnt_dp]/(float)nsample[cnt_dp]; //mean vx
out[cnt_grid]->val[cnt_dp][1]=sy[cnt_dp]/(float)nsample[cnt_dp]; //mean vy
out[cnt_grid]->val[cnt_dp][2]=sxx[cnt_dp]/(float)nsample[cnt_dp]
-out[cnt_grid]->val[cnt_dp][0]*out[cnt_grid]->val[cnt_dp][0]; //var vx
out[cnt_grid]->val[cnt_dp][3]=syy[cnt_dp]/(float)nsample[cnt_dp]
-out[cnt_grid]->val[cnt_dp][1]*out[cnt_grid]->val[cnt_dp][1]; //var vy
out[cnt_grid]->val[cnt_dp][4]=(float)nsample[cnt_dp]/(float)num_dpoi; //# of samples
}
//TODO: Consider the way about reducing the overhead from memory allocation/deallocation
}
//deallocate dp_stack
dp_stack.nrows=num_dpoi;
id_cluster.nrows=num_dpoi;
for(cnt_dp=0;cnt_dp<num_dpoi;cnt_dp++)
{
free(dp_stack.val[cnt_dp]);
}
free(dp_stack.val);
//deallocate id_cluster
for(cnt_dp=0;cnt_dp<num_dpoi;cnt_dp++)
{
free(id_cluster.val[cnt_dp]);
}
free(id_cluster.val);
return out;
//free(sx);
//free(sxx);
//free(sy);
//free(syy);
//free(nsample);
}
void cluster_euclidian(GMA_float *dp_stack, float min_dist, GMA_uint8 *id_cluster)
{ //TODO : improve the efficiency
//GMA_uint8 *out;
GMA_uint8 *mtrx_dist;
float dx,dy;
float min_dist_sq=min_dist*min_dist;
uint8_t id_curr;
int32_t cnt1,cnt2;
//initialize the id_cluster
for(cnt1=0;cnt1<id_cluster->nrows;cnt1++)
{
id_cluster->val[cnt1][0]=0;
}
//calculate the distance matrix
mtrx_dist=GMA_uint8_create(dp_stack->nrows,dp_stack->nrows);
for(cnt1=0;cnt1<dp_stack->nrows;cnt1++)
{
for(cnt2=cnt1;cnt2<dp_stack->nrows;cnt2++)
{
dx=dp_stack->val[cnt2][0]-dp_stack->val[cnt1][0];
dy=dp_stack->val[cnt2][1]-dp_stack->val[cnt1][1];
if(dx*dx+dy*dy<min_dist_sq)
{
mtrx_dist->val[cnt1][cnt2]=1;
mtrx_dist->val[cnt2][cnt1]=1;
}
else
{
mtrx_dist->val[cnt1][cnt2]=0;
mtrx_dist->val[cnt2][cnt1]=0;
}
}
}
id_curr=0;
for(cnt1=0;cnt1<dp_stack->nrows;cnt1++)
{
if(id_cluster->val[cnt1][0]==0)
{
id_curr++;
mark_row(cnt1, id_curr, id_cluster, mtrx_dist);
}
}
GMA_uint8_destroy(mtrx_dist);
}
void mark_row(int32_t nrow, uint8_t id_dp, GMA_uint8 *id_cluster, GMA_uint8 *mtrx_dist)
{
int32_t cnt;
for(cnt=0;cnt<id_cluster->nrows;cnt++)
{
if(mtrx_dist->val[nrow][cnt] && id_cluster->val[cnt][0]==0)
{
id_cluster->val[cnt][0]=id_dp;
mark_row(cnt, id_dp, id_cluster, mtrx_dist);
}
}
}
GMA_uint8* get_mask_nomatching(GMA_float **mvn_dp)
{
int32_t cntu,cntv,cntvec;
GMA_uint8 *out;
out=GMA_uint8_create(dimy_vmap,dimx_vmap);
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cntvec=cntv*dimx_vmap+cntu;
if(mvn_dp[cntvec]->nrows==0)
{
out->val[cntv][cntu]=1;
}
else
{
out->val[cntv][cntu]=0;
}
}
}
//GMA_uint8_save("../MIMC2_C_GMA/mask_nomatching.GMA",out);
return out;
}
GMA_int32* get_dpf0(GMA_float **mvn_dp, float min_matching_ratio) //to get the most probable dps
{ //input parameter description [000][111][222][333][4444]
//mvn_dp: array of GMA_float*. [mvx][mvy][evx][evy][qual]
//min_matching_ratio: Minimum value of matching ratio to be considered as a prominent displacement (dpf0).
// This value is usually 0.6
//output value description
//out: [dimy_vmap][dimx_vmap] GMA_int32. Each cell contains the cluster ID of the DPF0
// The cell value is -1 (or less than zero) if there isn`t any corresponding cluster
int32_t cnt_grid,cnt_cluster;
int32_t cntx,cnty;
uint8_t flag_assigned;
GMA_int32 *out=GMA_int32_create(dimy_vmap,dimx_vmap);
//initialize the output
for(cnty=0;cnty<dimy_vmap;cnty++)
{
for(cntx=0;cntx<dimx_vmap;cntx++)
{
cnt_grid=cnty*dimx_vmap+cntx;
flag_assigned=0;
for(cnt_cluster=0;cnt_cluster<mvn_dp[cnt_grid]->nrows;cnt_cluster++)
{
if(mvn_dp[cnt_grid]->val[cnt_cluster][4]>min_matching_ratio)
{
out->val[cnty][cntx]=cnt_cluster;
flag_assigned=1;
break;
}
}
if(!flag_assigned)
{
out->val[cnty][cntx]=-1;
}
}
}
//GMA_int32_save("../MIMC2_C_GMA/dpf0.GMA",out);
return out;
}
GMA_int32* get_ruv_neighbor(GMA_double *xyuvav,float radius_neighbor)
{
GMA_int32 *out, *vec_buffer;
//unsigned int dimu,dimv,cu,cv; //cu, cv: center of the 2d grid
int32_t cu,cv; //cu, cv: center of the 2d grid
int32_t cntu,cntv;
int32_t num_grid_neighbor;
float dxy[2];
GMA_float *field_x, *field_y;
float sq_dist,cx,cy; //xy coordinate of the 2d grid
cu=dimx_vmap/2;
cv=dimy_vmap/2;
// create the mesh grid of x and y
field_x=GMA_float_create(dimy_vmap,dimx_vmap);
field_y=GMA_float_create(dimy_vmap,dimx_vmap);
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
field_x->val[cntv][cntu]=xyuvav->val[cntu][0];
field_y->val[cntv][cntu]=xyuvav->val[cntv*dimx_vmap][1];
}
}
cx=field_x->val[cv][cu];
cy=field_y->val[cv][cu];
vec_buffer=GMA_int32_create(num_grid,2);
num_grid_neighbor=0;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
dxy[0]=field_x->val[cntv][cntu]-cx;
dxy[1]=field_y->val[cntv][cntu]-cy;
sq_dist=dxy[0]*dxy[0]+dxy[1]*dxy[1];
if(sq_dist<=(radius_neighbor*param_mimc2.meter_per_spacing)*(radius_neighbor*param_mimc2.meter_per_spacing))
{
vec_buffer->val[num_grid_neighbor][0]=cntu-cu;
vec_buffer->val[num_grid_neighbor][1]=cntv-cv;
num_grid_neighbor++;
}
}
}
out=GMA_int32_create(num_grid_neighbor,2);
for(cntv=0;cntv<num_grid_neighbor;cntv++)
{
out->val[cntv][0]=vec_buffer->val[cntv][0];
out->val[cntv][1]=vec_buffer->val[cntv][1];
}
GMA_int32_destroy(vec_buffer);
GMA_float_destroy(field_x);
GMA_float_destroy(field_y);
return out;
}
void get_dpf1(GMA_int32 *dpf0, GMA_float *dpf_dx, GMA_float *dpf_dy, GMA_int32 *ruv_neighbor, GMA_float **mvn_dp, GMA_double *xyuvav)
{
int32_t cntu,cntv,cnt_grid,cnt_neighbor,cnt_cluster,id_cluster;
int32_t NOI,num_processed,num_grid_neighbor,num_grid_unprocessed;
int32_t uvoi[2];
GMA_float *dx_buffer, *dy_buffer;
GMA_float *vec_ruv_w_scale_mag;
GMA_float *mtrx_noi;
float dpe[2],dxy_neighbor[2],dxy[2],apv_neighbor[2];
float mag_dpe,mag_dxy_neighbor;
float N_A_N=sqrt(-1.0);
float sum_mag,sq_dist,sq_dist_closest;
char buffer_filename[2048];
dx_buffer=GMA_float_create(dimy_vmap,dimx_vmap);
dy_buffer=GMA_float_create(dimy_vmap,dimx_vmap);
mtrx_noi=GMA_float_create(dimy_vmap,dimx_vmap);
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
mtrx_noi->val[cntv][cntu]=1.0;
}
}
//vec_ruv_w_scale_mag=GMA_float_create(ruv_neighbor->nrows,4);
//reconstruct dpf_dx and dpf_dy. Also initialize the dx_buffer and dy_buffer
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if(dpf0->val[cntv][cntu]>=0)
{
dpf_dx->val[cntv][cntu]=mvn_dp[cnt_grid]->val[dpf0->val[cntv][cntu]][0];
dpf_dy->val[cntv][cntu]=mvn_dp[cnt_grid]->val[dpf0->val[cntv][cntu]][1];
}
else
{
dpf_dx->val[cntv][cntu]=N_A_N;
dpf_dy->val[cntv][cntu]=N_A_N;
}
dx_buffer->val[cntv][cntu]=N_A_N;
dy_buffer->val[cntv][cntu]=N_A_N;
}
}
//debugging code
//GMA_float_save("/Users/seongsu/Desktop/bad_vmaptar_example/dpf0_gx.gma",dpf_dx);
//GMA_float_save("/Users/seongsu/Desktop/bad_vmaptar_example/dpf0_gy.gma",dpf_dy);
NOI=0;
num_grid_unprocessed=1;
int32_t thres_num_grid_neighbor;
for(thres_num_grid_neighbor=ruv_neighbor->nrows-1;thres_num_grid_neighbor>=3;thres_num_grid_neighbor--)
{
float thres_weight=0.5;
float factor_mpy_to_px=1.0/365.0*dt/param_mimc2.mpp;
while(num_grid_unprocessed!=0 && thres_weight>=0.5)
{
thres_weight-=0.02;
num_processed=1;
while(num_processed!=0)
{
NOI++;
num_processed=0;
//#pragma omp parallel private(cnt_grid,id_cluster,num_grid_neighbor,uvoi, vec_ruv_w_scale_mag, dpe, dxy_neighbor, dxy, sq_dist, sq_dist_closest, sum_mag) \
shared(dpf0, dpf_dx, dpf_dy, ruv_neighbor, mvn_dp, xyuvav, dx_buffer, dy_buffer, N_A_N)
// {
vec_ruv_w_scale_mag=GMA_float_create(ruv_neighbor->nrows,7);
//#pragma omp for schedule(dynamic) collapse(2)
for(cntv=0;cntv<dimy_vmap;cntv++)
{ //loop around the array and process if the visited grid needs to
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if(isnan(dpf_dx->val[cntv][cntu]+dpf_dy->val[cntv][cntu]) && mvn_dp[cnt_grid]->nrows!=0)
{ //dx validity check dy validity check nomatching check
sum_mag=0;
num_grid_neighbor=0;
dpe[0]=xyuvav->val[cnt_grid][4]*factor_mpy_to_px;
dpe[1]=-xyuvav->val[cnt_grid][5]*factor_mpy_to_px;
mag_dpe=sqrt(dpe[0]*dpe[0]+dpe[1]*dpe[1]);
//printf("factor=%f,dpe=[%e,%e]\n",factor_mpy_to_px,dpe[0],dpe[1]);
for(cnt_neighbor=0;cnt_neighbor<ruv_neighbor->nrows;cnt_neighbor++)
{
uvoi[0]=cntu+ruv_neighbor->val[cnt_neighbor][0];
uvoi[1]=cntv+ruv_neighbor->val[cnt_neighbor][1];
if(uvoi[0]>=0 && uvoi[0]<dimx_vmap && uvoi[1]>=0 && uvoi[1]<dimy_vmap) //boundary check
{
dxy_neighbor[0]=dpf_dx->val[uvoi[1]][uvoi[0]];
dxy_neighbor[1]=dpf_dy->val[uvoi[1]][uvoi[0]];
mag_dxy_neighbor=sqrt(dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1]);
if(!isnan(dxy_neighbor[0]+dxy_neighbor[1]))
//TODO: change: if(!isnan(dxy_neighbor[0]+dxy_neighbor[1]))
{
apv_neighbor[0]=(float)(xyuvav->val[uvoi[1]*dimx_vmap+uvoi[0]][4])*factor_mpy_to_px;
apv_neighbor[1]=-(float)(xyuvav->val[uvoi[1]*dimx_vmap+uvoi[0]][5])*factor_mpy_to_px;
vec_ruv_w_scale_mag->val[num_grid_neighbor][0]=(float)ruv_neighbor->val[cnt_neighbor][0];//relative u coordinate
vec_ruv_w_scale_mag->val[num_grid_neighbor][1]=(float)ruv_neighbor->val[cnt_neighbor][1];//relative v coordinate
vec_ruv_w_scale_mag->val[num_grid_neighbor][4]=sqrt(dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1]);//magnitude
vec_ruv_w_scale_mag->val[num_grid_neighbor][5]=sqrt(apv_neighbor[0]*apv_neighbor[0]+apv_neighbor[1]*apv_neighbor[1]);//magnitude
vec_ruv_w_scale_mag->val[num_grid_neighbor][6]=mtrx_noi->val[uvoi[1]][uvoi[0]];//magnitude
//vec_ruv_w_scale_mag->val[num_grid_neighbor][3]=sqrt((dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1])
// /(apv_neighbor[0]*apv_neighbor[0]+apv_neighbor[1]*apv_neighbor[1]));//scale factor
vec_ruv_w_scale_mag->val[num_grid_neighbor][3]=vec_ruv_w_scale_mag->val[num_grid_neighbor][4]
/sqrt(apv_neighbor[0]*apv_neighbor[0]+apv_neighbor[1]*apv_neighbor[1]);//scale factor
//vec_ruv_w_scale_mag->val[num_grid_neighbor][3]=sqrt(dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1])-
// sqrt(apv_neighbor[0]*apv_neighbor[0]+apv_neighbor[1]*apv_neighbor[1]);//Residuals
//vec_ruv_w_scale_mag->val[num_grid_neighbor][3]=sqrt((dxy_neighbor[0]-apv_neighbor[0])*(dxy_neighbor[0]-apv_neighbor[0])+(dxy_neighbor[1]-apv_neighbor[1])*(dxy_neighbor[1]-apv_neighbor[1]));
num_grid_neighbor++;
}
}
}
if(num_grid_neighbor>=thres_num_grid_neighbor)
{//Enough neighboring samples. perform the interpolation
float w_min=1E+37;
float w_max=-1E+37;
float max_noi=1.0;
int32_t id_w_max=0;
int32_t id_w_min=0;
for(cnt_neighbor=0;cnt_neighbor<num_grid_neighbor;cnt_neighbor++)
{
//Calculate the weight using dot product
dxy[0]=vec_ruv_w_scale_mag->val[cnt_neighbor][0];
dxy[1]=vec_ruv_w_scale_mag->val[cnt_neighbor][1];
float mag_dxy=sqrt(dxy[0]*dxy[0]+dxy[1]*dxy[1]);
float w_candidate=(dpe[0]*dxy[0]+dpe[1]*dxy[1])/(mag_dpe*mag_dxy);
w_candidate=w_candidate>0?w_candidate:-w_candidate;
//cut off if the inner product is less than the threshold
//TODO: consider distance to determine the weight
//Might be better to make LUT when calculating the ruv
if(w_candidate >= thres_weight ) //dot product
{
//vec_ruv_w_scale_mag->val[cnt_neighbor][2]=w_candidate*w_candidate;//attenuate the weight by squaring it (trial code)
vec_ruv_w_scale_mag->val[cnt_neighbor][2]=w_candidate;
//check the min/max
if(vec_ruv_w_scale_mag->val[cnt_neighbor][3]>w_max)
{
w_max=vec_ruv_w_scale_mag->val[cnt_neighbor][3];
id_w_max=cnt_neighbor;
}
if(vec_ruv_w_scale_mag->val[cnt_neighbor][3]<w_min)
{
w_min=vec_ruv_w_scale_mag->val[cnt_neighbor][3];
id_w_min=cnt_neighbor;
}
}
else
{
vec_ruv_w_scale_mag->val[cnt_neighbor][2]=0.0;
}
}
//cut out the maximum and minimum values
vec_ruv_w_scale_mag->val[id_w_max][2]=0.0;
vec_ruv_w_scale_mag->val[id_w_min][2]=0.0;
//debug code
if(cntu==15 && cntv==30)
{
printf("dpe=[%f,%f]\n",dpe[0],dpe[1]);
printf("vec_ruv_w_scale_mag:\n");
GMA_float_print(vec_ruv_w_scale_mag);
}
//end of the debugging code
float sum_mag_times_w=0.0,sum_w=0.0,sum_w2=0.0,w2;
float sum_w_dp=0.0,sum_w_dpe=0.0,sum_noi=0.0;
for(cnt_neighbor=0;cnt_neighbor<num_grid_neighbor;cnt_neighbor++)
{
//w2=1/(1+expf(-mag_dpe+5));
w2=1/(1+expf(-vec_ruv_w_scale_mag->val[cnt_neighbor][5]+5))/max_noi;
//printf("mag_dpe=%f, w2=%f\n",mag_dpe,w2);
//sum_mag_times_w+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*vec_ruv_w_scale_mag->val[cnt_neighbor][3];
sum_mag_times_w+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*vec_ruv_w_scale_mag->val[cnt_neighbor][3]*w2;
sum_w+=vec_ruv_w_scale_mag->val[cnt_neighbor][2];
//sum_w2+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*w2;
//debug code
if(cntu==15 && cntv==30)
{
printf("cnt_neighbor=%d, mag_dpe=%f, w2=%f\n",cnt_neighbor,mag_dpe,w2);
}
//end of the debugging code
//test code - 3/12/2018 trial 1
sum_w_dp+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*w2*vec_ruv_w_scale_mag->val[cnt_neighbor][4]/vec_ruv_w_scale_mag->val[cnt_neighbor][6];
sum_w_dpe+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*w2*vec_ruv_w_scale_mag->val[cnt_neighbor][5]/vec_ruv_w_scale_mag->val[cnt_neighbor][6];
sum_noi+=vec_ruv_w_scale_mag->val[cnt_neighbor][6];
sum_w2+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*w2/vec_ruv_w_scale_mag->val[cnt_neighbor][6];
}
//debug code
if(cntu==15 && cntv==30)
{
printf("sum_mag_times_w=%f\n",sum_mag_times_w);
printf("sum_w=%f\n",sum_w);
printf("sum_w2=%f\n",sum_w2);
}
//end of the debugging code
if(sum_w>=1.0)
{
//float factor_mag=sum_mag_times_w/sum_w;
//float factor_mag=sum_mag_times_w/sum_w2;
//dx_buffer->val[cntv][cntu]=dpe[0]*factor_mag;
//dy_buffer->val[cntv][cntu]=dpe[1]*factor_mag;
//float factor_mag=1.0+sum_mag_times_w/sum_w/mag_dpe;
//dx_buffer->val[cntv][cntu]=dpe[0]*factor_mag;
//dy_buffer->val[cntv][cntu]=dpe[1]*factor_mag;
//test code - 3/12/2018 trial 1
float factor_mag=sum_w_dp/sum_w_dpe;
dx_buffer->val[cntv][cntu]=dpe[0]*factor_mag;
dy_buffer->val[cntv][cntu]=dpe[1]*factor_mag;
mtrx_noi->val[cntv][cntu]=sum_noi/num_grid_neighbor+1;
num_processed++;
}
}//if(num_grid_neighbor>=5)
}//if(isnan(dpf_dx->val[cntv][cntu]) && isnan(dpf_dx->val[cntv][cntu]) && mvn_dp[cnt_grid]->nrows!=0)
}//for(cntu=0;cntu<dimx_vmap;cnty++)
}//for(cntv=0;cntv<dimy_vmap;cntv++)
GMA_float_destroy(vec_ruv_w_scale_mag);
//}//#pragma omp parallel private....
//put the interpolated values in the buffer to dpf_dx and dpf_dy
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
if(!isnan(dx_buffer->val[cntv][cntu]) && !isnan(dy_buffer->val[cntv][cntu]))
{
dpf_dx->val[cntv][cntu]=dx_buffer->val[cntv][cntu];
dpf_dy->val[cntv][cntu]=dy_buffer->val[cntv][cntu];
dx_buffer->val[cntv][cntu]=N_A_N;
dy_buffer->val[cntv][cntu]=N_A_N;
}
}
}
//sprintf(buffer_filename,"/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dx_%02d.GMA",NOI);
//GMA_float_save(buffer_filename,dpf_dx);
//sprintf(buffer_filename,"/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dy_%02d.GMA",NOI);
//GMA_float_save(buffer_filename,dpf_dy);
}//while(num_processed!=0)
//scan through the dpf0 and see if there is any grid that does not have corresponding clusters
num_grid_unprocessed=0;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if((isnan(dpf_dx->val[cntv][cntu]) || isnan(dpf_dy->val[cntv][cntu])) && mvn_dp[cnt_grid]->nrows!=0 )
{
num_grid_unprocessed++;
}
}
}
printf("NOI=%d, weight threshold=%.2f, #remaining=%d\n",NOI,thres_weight,num_grid_unprocessed);
}//while(num_grid_unprocessed!=0)
}//for(thres_num_grid_neighbor=ruv_neighbor->nrows-1;thres_num_grid_neighbor>=3;thres_num_grid_neighbor--)
//GMA_float_save("../MIMC2_C_GMA/dpf2_dx_interp.GMA",dpf_dx);
//GMA_float_save("../MIMC2_C_GMA/dpf2_dy_interp.GMA",dpf_dy);
//Smoothing the interpolated grids (dpf_dx and dpf_dy)
printf("Smoothing the inteprolation results before finding the corresponding clusters\n");
float sum_dx,sum_dy;
float num_disp;
int32_t du,dv;
for(cntv=1;cntv<dimy_vmap-1;cntv++)
{
for(cntu=1;cntu<dimx_vmap-1;cntu++)
{
//if(dpf0->val[cntv][cntu]<0 && mvn_dp[cnt_grid]->nrows!=0)
if(dpf0->val[cntv][cntu]<0 && !isnan(dpf_dx->val[cntv][cntu]+dpf_dy->val[cntv][cntu]))
{
//printf("Smoothing comes in! [cntu,cntv]=[%d,%d]\n",cntu,cntv);
num_disp=0.0;
sum_dx=0.0;
sum_dy=0.0;
for(dv=-1;dv<=1;dv++)
{
for(du=-1;du<=1;du++)
{
if(!isnan(dpf_dx->val[cntv+dv][cntu+du]+dpf_dy->val[cntv+dv][cntu+du]))
{
sum_dx+=dpf_dx->val[cntv+dv][cntu+du];
sum_dy+=dpf_dy->val[cntv+dv][cntu+du];
num_disp=num_disp+1;
}
}
}
dx_buffer->val[cntv][cntu]=sum_dx/num_disp;
dy_buffer->val[cntv][cntu]=sum_dy/num_disp;
}
else
{
dx_buffer->val[cntv][cntu]=dpf_dx->val[cntv][cntu];
dy_buffer->val[cntv][cntu]=dpf_dy->val[cntv][cntu];
}
}
}
for(cntv=1;cntv<dimy_vmap-1;cntv++)
{
for(cntu=1;cntu<dimx_vmap-1;cntu++)
{
dpf_dx->val[cntv][cntu]=dx_buffer->val[cntv][cntu];
dpf_dy->val[cntv][cntu]=dy_buffer->val[cntv][cntu];
}
}
printf("Smoothing complete\n");
//sprintf(buffer_filename,"/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dx_%02d.GMA",NOI+1);
//GMA_float_save(buffer_filename,dpf_dx);
//sprintf(buffer_filename,"/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dy_%02d.GMA",NOI+1);
//GMA_float_save(buffer_filename,dpf_dy);
printf("dpf2 approximation successful. proceeding to final adjustment\n");
//put mean cluster dps and IDs closest to the interpolated displacements
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if(dpf0->val[cntv][cntu]<0 && mvn_dp[cnt_grid]->nrows!=0)
{
sq_dist_closest=1E+37;
id_cluster=0;
for(cnt_cluster=0;cnt_cluster<mvn_dp[cnt_grid]->nrows;cnt_cluster++)
{
dxy[0]=dpf_dx->val[cntv][cntu]-mvn_dp[cnt_grid]->val[cnt_cluster][0];
dxy[1]=dpf_dy->val[cntv][cntu]-mvn_dp[cnt_grid]->val[cnt_cluster][1];
sq_dist=dxy[0]*dxy[0]+dxy[1]*dxy[1];
if(sq_dist<sq_dist_closest)
{
sq_dist_closest=sq_dist;
id_cluster=cnt_cluster;
}
}
dpf0->val[cntv][cntu]=id_cluster;
dpf_dx->val[cntv][cntu]=mvn_dp[cnt_grid]->val[id_cluster][0];
dpf_dy->val[cntv][cntu]=mvn_dp[cnt_grid]->val[id_cluster][1];
}
}
}
//GMA_float_save("/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dx_final.GMA",dpf_dx);
//GMA_float_save("/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dy_final.GMA",dpf_dy);
//GMA_int32_save("../MIMC2_C_GMA/dpf2_cluster.GMA",dpf0);
printf("dpf1 adjustment successful. finalizing the process\n");
//de-allocate array
GMA_float_destroy(dx_buffer);
GMA_float_destroy(dy_buffer);
//GMA_float_destroy(vec_ruv_w_scale_mag);
}
void get_dpf1_original(GMA_int32 *dpf0, GMA_float *dpf_dx, GMA_float *dpf_dy, GMA_int32 *ruv_neighbor, GMA_float **mvn_dp, GMA_double *xyuvav) //get the initial dp for pseudosmoothing
{
int32_t cntu,cntv,cnt_grid,cnt_neighbor,cnt_cluster,id_cluster;
int32_t NOI,num_processed,num_grid_neighbor,num_grid_unprocessed;
int32_t uvoi[2];
GMA_float *dx_buffer, *dy_buffer;
GMA_float *vec_ruv_w_scale_mag;
float dpe[2],dxy_neighbor[2],dxy[2],apv_neighbor[2];
float N_A_N=sqrt(-1.0);
float sum_mag,sq_dist,sq_dist_closest;
char buffer_filename[2048];
dx_buffer=GMA_float_create(dimy_vmap,dimx_vmap);
dy_buffer=GMA_float_create(dimy_vmap,dimx_vmap);
//vec_ruv_w_scale_mag=GMA_float_create(ruv_neighbor->nrows,4);
//reconstruct dpf_dx and dpf_dy. Also initialize the dx_buffer and dy_buffer
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if(dpf0->val[cntv][cntu]>=0)
{
dpf_dx->val[cntv][cntu]=mvn_dp[cnt_grid]->val[dpf0->val[cntv][cntu]][0];
dpf_dy->val[cntv][cntu]=mvn_dp[cnt_grid]->val[dpf0->val[cntv][cntu]][1];
}
else
{
dpf_dx->val[cntv][cntu]=N_A_N;
dpf_dy->val[cntv][cntu]=N_A_N;
}
dx_buffer->val[cntv][cntu]=N_A_N;
dy_buffer->val[cntv][cntu]=N_A_N;
}
}
//debugging code
GMA_float_save("/Users/seongsu/Desktop/bad_vmaptar_example/dpf0_gx.gma",dpf_dx);
GMA_float_save("/Users/seongsu/Desktop/bad_vmaptar_example/dpf0_gy.gma",dpf_dy);
NOI=0;
num_grid_unprocessed=1;
float thres_weight=1.0;
while(num_grid_unprocessed!=0 && thres_weight>=0.5)
{
thres_weight-=0.02;
num_processed=1;
while(num_processed!=0)
{
NOI++;
num_processed=0;
//#pragma omp parallel private(cnt_grid,id_cluster,num_grid_neighbor,uvoi, vec_ruv_w_scale_mag, dpe, dxy_neighbor, dxy, sq_dist, sq_dist_closest, sum_mag) \
shared(dpf0, dpf_dx, dpf_dy, ruv_neighbor, mvn_dp, xyuvav, dx_buffer, dy_buffer, N_A_N)
// {
vec_ruv_w_scale_mag=GMA_float_create(ruv_neighbor->nrows,4);
//#pragma omp for schedule(dynamic) collapse(2)
for(cntv=0;cntv<dimy_vmap;cntv++)
{ //loop around the array and process if the visited grid needs to
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if(isnan(dpf_dx->val[cntv][cntu]) && isnan(dpf_dy->val[cntv][cntu]) && mvn_dp[cnt_grid]->nrows!=0)
{ //dx validity check dy validity check nomatching check
sum_mag=0;
num_grid_neighbor=0;
dpe[0]=xyuvav->val[cnt_grid][4]/365*dt/param_mimc2.mpp;
dpe[1]=-xyuvav->val[cnt_grid][5]/365*dt/param_mimc2.mpp;
for(cnt_neighbor=0;cnt_neighbor<ruv_neighbor->nrows;cnt_neighbor++)
{
uvoi[0]=cntu+ruv_neighbor->val[cnt_neighbor][0];
uvoi[1]=cntv+ruv_neighbor->val[cnt_neighbor][1];
if(uvoi[0]>=0 && uvoi[0]<dimx_vmap && uvoi[1]>=0 && uvoi[1]<dimy_vmap)//boundary check
{
dxy_neighbor[0]=dpf_dx->val[uvoi[1]][uvoi[0]];
dxy_neighbor[1]=dpf_dy->val[uvoi[1]][uvoi[0]];
if(!isnan(dxy_neighbor[0]) && !isnan(dxy_neighbor[1]))
{
apv_neighbor[0]=(float)(xyuvav->val[uvoi[1]*dimx_vmap+uvoi[0]][4])/365*dt/param_mimc2.mpp;
apv_neighbor[1]=-(float)(xyuvav->val[uvoi[1]*dimx_vmap+uvoi[0]][5])/365*dt/param_mimc2.mpp;
vec_ruv_w_scale_mag->val[num_grid_neighbor][0]=(float)ruv_neighbor->val[cnt_neighbor][0];//relative u coordinate
vec_ruv_w_scale_mag->val[num_grid_neighbor][1]=(float)ruv_neighbor->val[cnt_neighbor][1];//relative v coordinate
//vec_ruv_w_scale_mag->val[num_grid_neighbor][3]=sqrt(dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1]);//magnitude
vec_ruv_w_scale_mag->val[num_grid_neighbor][3]=sqrt((dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1])/(apv_neighbor[0]*apv_neighbor[0]+apv_neighbor[1]*apv_neighbor[1]));//scale factor
//49,57
if(uvoi[0]==48 && uvoi[0]==56)
{
printf("%f\t",sqrt((dxy_neighbor[0]*dxy_neighbor[0]+dxy_neighbor[1]*dxy_neighbor[1])/(apv_neighbor[0]*apv_neighbor[0]+apv_neighbor[1]*apv_neighbor[1])));
}
num_grid_neighbor++;
}
}
}
if(num_grid_neighbor>=5)
{//Enough neighboring samples. perform the interpolation
//normalize dpe
float mag_dpe=sqrt(dpe[0]*dpe[0]+dpe[1]*dpe[1]);
dpe[0]/=mag_dpe;
dpe[1]/=mag_dpe;
float w_min=1E+37;
float w_max=-1E+37;
int32_t id_w_max=0;
int32_t id_w_min=0;
for(cnt_neighbor=0;cnt_neighbor<num_grid_neighbor;cnt_neighbor++)
{
//calculate inner product
dxy[0]=vec_ruv_w_scale_mag->val[cnt_neighbor][0];
dxy[1]=vec_ruv_w_scale_mag->val[cnt_neighbor][1];
float mag_dxy=sqrt(dxy[0]*dxy[0]+dxy[1]*dxy[1]);
dxy[0]/=mag_dxy;
dxy[1]/=mag_dxy;
//Calculate the weight using tot product
//cut off if the inner product is less than the threshold (0.7)
//TODO :make the threshold (0.7) to be adjustable
float w_candidate=dpe[0]*dxy[0]+dpe[1]*dxy[1];
if(w_candidate<0)
{
w_candidate=-w_candidate;
}
if(w_candidate >= thres_weight ) //dot product
{
vec_ruv_w_scale_mag->val[cnt_neighbor][2]=w_candidate;
//check the min/max
if(vec_ruv_w_scale_mag->val[cnt_neighbor][3]>w_max)
{
w_max=vec_ruv_w_scale_mag->val[cnt_neighbor][3];
id_w_max=cnt_neighbor;
}
if(vec_ruv_w_scale_mag->val[cnt_neighbor][3]<w_min)
{
w_min=vec_ruv_w_scale_mag->val[cnt_neighbor][3];
id_w_min=cnt_neighbor;
}
}
else
{
vec_ruv_w_scale_mag->val[cnt_neighbor][2]=0.0;
}
}
//cut out the maximum and minimum values
vec_ruv_w_scale_mag->val[id_w_max][2]=0.0;
vec_ruv_w_scale_mag->val[id_w_min][2]=0.0;
float sum_mag_times_w=0,sum_w=0;
for(cnt_neighbor=0;cnt_neighbor<num_grid_neighbor;cnt_neighbor++)
{
sum_mag_times_w+=vec_ruv_w_scale_mag->val[cnt_neighbor][2]*vec_ruv_w_scale_mag->val[cnt_neighbor][3];
sum_w+=vec_ruv_w_scale_mag->val[cnt_neighbor][2];
}
if(sum_w>=1.0)
{
//float mag_dpe=
float factor_mag=sum_mag_times_w/sum_w;
dx_buffer->val[cntv][cntu]=dpe[0]*factor_mag*mag_dpe;
dy_buffer->val[cntv][cntu]=dpe[1]*factor_mag*mag_dpe;
num_processed++;
}
}//if(num_grid_neighbor>=5)
}//if(isnan(dpf_dx->val[cntv][cntu]) && isnan(dpf_dx->val[cntv][cntu]) && mvn_dp[cnt_grid]->nrows!=0)
}//for(cntu=0;cntu<dimx_vmap;cnty++)
}//for(cntv=0;cntv<dimy_vmap;cntv++)
GMA_float_destroy(vec_ruv_w_scale_mag);
//}//#pragma omp parallel private....
//put the interpolated values in the buffer to dpf_dx and dpf_dy
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
if(!isnan(dx_buffer->val[cntv][cntu]) && !isnan(dy_buffer->val[cntv][cntu]))
{
dpf_dx->val[cntv][cntu]=dx_buffer->val[cntv][cntu];
dpf_dy->val[cntv][cntu]=dy_buffer->val[cntv][cntu];
dx_buffer->val[cntv][cntu]=N_A_N;
dy_buffer->val[cntv][cntu]=N_A_N;
}
}
}
sprintf(buffer_filename,"/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dx_%02d.GMA",NOI);
//printf("%s\n",buffer_filename);
GMA_float_save(buffer_filename,dpf_dx);
sprintf(buffer_filename,"/Users/seongsu/Desktop/bad_vmaptar_example/dpf2_dy_%02d.GMA",NOI);
//printf("%s\n",buffer_filename);
GMA_float_save(buffer_filename,dpf_dy);
}//while(num_processed!=0)
//scan through the dpf0 and see if there is any grid that does not have corresponding clusters
num_grid_unprocessed=0;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if((isnan(dpf_dx->val[cntv][cntu]) || isnan(dpf_dy->val[cntv][cntu])) && mvn_dp[cnt_grid]->nrows!=0 )
{
num_grid_unprocessed++;
}
}
}
printf("NOI=%d, weight threshold=%.2f, #remaining=%d\n",NOI,thres_weight,num_grid_unprocessed);
}//while(num_grid_unprocessed!=0)
//GMA_float_save("../MIMC2_C_GMA/dpf2_dx_interp.GMA",dpf_dx);
//GMA_float_save("../MIMC2_C_GMA/dpf2_dy_interp.GMA",dpf_dy);
printf("dpf2 approximation successful. proceeding to final adjustment\n");
//put mean cluster dps and IDs closest to the interpolated displacements
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
cnt_grid=cntv*dimx_vmap+cntu;
if(dpf0->val[cntv][cntu]<0 && mvn_dp[cnt_grid]->nrows!=0)
{
sq_dist_closest=1E+37;
id_cluster=0;
for(cnt_cluster=0;cnt_cluster<mvn_dp[cnt_grid]->nrows;cnt_cluster++)
{
dxy[0]=dpf_dx->val[cntv][cntu]-mvn_dp[cnt_grid]->val[cnt_cluster][0];
dxy[1]=dpf_dy->val[cntv][cntu]-mvn_dp[cnt_grid]->val[cnt_cluster][1];
sq_dist=dxy[0]*dxy[0]+dxy[1]*dxy[1];
if(sq_dist<sq_dist_closest)
{
sq_dist_closest=sq_dist;
id_cluster=cnt_cluster;
}
}
dpf0->val[cntv][cntu]=id_cluster;
dpf_dx->val[cntv][cntu]=mvn_dp[cnt_grid]->val[id_cluster][0];
dpf_dy->val[cntv][cntu]=mvn_dp[cnt_grid]->val[id_cluster][1];
}
}
}
//GMA_float_save("../MIMC2_C_GMA/dpf2_dx_final.GMA",dpf_dx);
//GMA_float_save("../MIMC2_C_GMA/dpf2_dy_final.GMA",dpf_dy);
//GMA_int32_save("../MIMC2_C_GMA/dpf2_cluster.GMA",dpf0);
printf("dpf1 adjustment successful. finalizing the process\n");
//de-allocate array
GMA_float_destroy(dx_buffer);
GMA_float_destroy(dy_buffer);
//GMA_float_destroy(vec_ruv_w_scale_mag);
}
void get_dpf_pseudosmoothing(GMA_int32 *dpf,GMA_float *dpf_dx, GMA_float *dpf_dy,GMA_int32 *ruv_neighbor,GMA_float **mvn_dp, GMA_double *xyuvav)
{
GMA_uint8 *mask_investigate, *mask_investigate_next, **mask_investigate_layer;
GMA_uint8 **stack_mask_investigate;
GMA_int32 *uv_neighbor;//,*uvoi;
GMA_double *uvoi;
GMA_double *w, *duv_neighbor, *duv_interp;
int32_t NOI=0, max_iter=100;
int32_t cntu,cntv,cntn,cntc;
int32_t idx_vec_corr, id_cluster;
int32_t num_neighbor;
double vxyoi[2],eigvel[2],ITM[4],duv_cluster[2],duv_grid[2],duv_candidate[2];
char flag_any_modification, flag_fluctuation;
double sq_dist, sq_dist_min;
int32_t num_cluster,id_cluster_closest;
float N_A_N=sqrt(-1.0);
eigvel[0]=1500.0/300.0;
eigvel[1]=eigvel[0]/3.0;
//determine the initial values of mask_investigate and mask_investigate_next
printf("Initializing the layer array\n");
mask_investigate_layer=(GMA_uint8**)malloc(2*sizeof(GMA_uint8*));
mask_investigate_layer[0]=GMA_uint8_create(dimy_vmap,dimx_vmap);
mask_investigate_layer[1]=GMA_uint8_create(dimy_vmap,dimx_vmap);
mask_investigate=mask_investigate_layer[0];
mask_investigate_next=mask_investigate_layer[1];
GMA_float **dxy_ps_buffer;
GMA_int32 *dpf_ps_buffer;
uv_neighbor=GMA_int32_create(ruv_neighbor->nrows,ruv_neighbor->ncols);
duv_neighbor=GMA_double_create(ruv_neighbor->nrows,ruv_neighbor->ncols);
uvoi=GMA_double_create(1,2);
w=GMA_double_create(ruv_neighbor->nrows,1);
duv_interp=GMA_double_create(1,2);
dxy_ps_buffer=malloc(2*sizeof(GMA_float*));
dxy_ps_buffer[0]=GMA_float_create(dimy_vmap,dimx_vmap);
dxy_ps_buffer[1]=GMA_float_create(dimy_vmap,dimx_vmap);
dpf_ps_buffer=GMA_int32_create(dimy_vmap,dimx_vmap);
//TODO: consider parallelizing the for loop below
printf("Determining the grids to investigate, while initializing dxy_ps_buffer\n");
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
idx_vec_corr=cntv*dimx_vmap+cntu;
id_cluster=dpf->val[cntv][cntu];
if(id_cluster<0)
{
mask_investigate->val[cntv][cntu]=0;
}
else
{
if(mvn_dp[idx_vec_corr]->val[id_cluster][4]>=0.6)
{
mask_investigate->val[cntv][cntu]=0;
}
else
{
mask_investigate->val[cntv][cntu]=1;
}
}
dxy_ps_buffer[0]->val[cntv][cntu]=N_A_N;
dxy_ps_buffer[1]->val[cntv][cntu]=N_A_N;
dpf_ps_buffer->val[cntv][cntu]=-1;
}
}
//GMA_uint8_save("../MIMC2_C_GMA/mask_investigate.GMA",mask_investigate);
printf("Allocating mask stack\n");
//allocate stack_mask_investigate
stack_mask_investigate=(GMA_uint8 **)malloc(max_iter*sizeof(GMA_uint8*));
//copy the initial mask_investigate
stack_mask_investigate[0]=GMA_uint8_create(dimy_vmap,dimx_vmap);
//TODO: consider parallelizing the for loop below
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
stack_mask_investigate[0]->val[cntv][cntu]=mask_investigate->val[cntv][cntu];
}
}
printf("Initiating while loop\n");
flag_any_modification=1;
flag_fluctuation=0;
while(NOI<=100 && flag_any_modification && ~flag_fluctuation)
{
mask_investigate=mask_investigate_layer[NOI%2];
mask_investigate_next=mask_investigate_layer[(NOI+1)%2];
flag_any_modification=0;
NOI++;
//clean up mask_investigate_next
//TODO: consider parallelizing the for loop below
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
mask_investigate_next->val[cntv][cntu]=0;
}
}
//loop through the mask_investigate
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
if(mask_investigate->val[cntv][cntu]==0)
{
//The case that the visited grid does not need to be investigated
continue;
}
else //The case that the visited grid NEEDS to be investigated
{
//extract the u,v, du and dv
num_neighbor=0;
for(cntn=0;cntn<ruv_neighbor->nrows;cntn++)
{
int32_t uvoi_neighbor[2];
uvoi_neighbor[0]=cntu+ruv_neighbor->val[cntn][0];
uvoi_neighbor[1]=cntv+ruv_neighbor->val[cntn][1];
if(uvoi_neighbor[0]>=0 && uvoi_neighbor[0]<dimx_vmap && //boundary check
uvoi_neighbor[1]>=0 && uvoi_neighbor[1]<dimy_vmap &&
!isnan(dpf_dx->val[uvoi_neighbor[1]][uvoi_neighbor[0]]) &&
!isnan(dpf_dy->val[uvoi_neighbor[1]][uvoi_neighbor[0]])) //validity check of displacement matrix
{
uv_neighbor->val[num_neighbor][0]=ruv_neighbor->val[cntn][0];
uv_neighbor->val[num_neighbor][1]=ruv_neighbor->val[cntn][1];
id_cluster=dpf->val[cntv+ruv_neighbor->val[cntn][1]][cntu+ruv_neighbor->val[cntn][0]];
duv_neighbor->val[num_neighbor][0]=dpf_dx->val[uvoi_neighbor[1]][uvoi_neighbor[0]];
duv_neighbor->val[num_neighbor][1]=dpf_dy->val[uvoi_neighbor[1]][uvoi_neighbor[0]];
num_neighbor++;
}
}
uv_neighbor->nrows=num_neighbor;
duv_neighbor->nrows=num_neighbor;
w->nrows=num_neighbor;
if(num_neighbor>=10) // Enough number of neighboring grids
{
//printf("Enough number of neighboring displacements found: %d>=10\n",num_neighbor);
uvoi->val[0][0]=0.0;
uvoi->val[0][1]=0.0;
//calculate weight
idx_vec_corr=cntv*dimx_vmap+cntu;
//retrieve a priori from xyuvav
vxyoi[0]=xyuvav->val[idx_vec_corr][4];
vxyoi[1]=xyuvav->val[idx_vec_corr][5];
ITM[0]=(eigvel[1]*vxyoi[0]*vxyoi[0]+eigvel[0]*vxyoi[1]*vxyoi[1])/((eigvel[0]*eigvel[1])*(vxyoi[0]*vxyoi[0]+vxyoi[1]*vxyoi[1]));
ITM[1]=((eigvel[0]-eigvel[1])*vxyoi[0]*vxyoi[1])/((eigvel[0]*eigvel[1])*(vxyoi[0]*vxyoi[0]+vxyoi[1]*vxyoi[1]));
ITM[3]=(eigvel[1]*vxyoi[1]*vxyoi[1] +eigvel[0]*vxyoi[0]*vxyoi[0])/((eigvel[0]*eigvel[1])*(vxyoi[0]*vxyoi[0]+vxyoi[1]*vxyoi[1]));
for(cntn=0;cntn<num_neighbor;cntn++)
{
w->val[cntn][0]=exp(-(ITM[0]*uv_neighbor->val[cntn][0]*uv_neighbor->val[cntn][0]
+2*ITM[1]*uv_neighbor->val[cntn][0]*uv_neighbor->val[cntn][1]
+ITM[3]*uv_neighbor->val[cntn][1]*uv_neighbor->val[cntn][1]));
//TODO: adjust the coefficients
//TODO: Think about making use of the symmetric properties of the weight vector
}
//End calculating weight
//calculate dui and dvi
quadfit2(uv_neighbor,duv_neighbor,w,uvoi,duv_interp);
//compare duv_interp with the mean values of the corresponding clusters
id_cluster=dpf->val[cntv][cntu];
num_cluster=mvn_dp[idx_vec_corr]->nrows;
sq_dist_min=1E+37;
int8_t flag_update_grid=0;
for(cntc=0;cntc<num_cluster;cntc++)
{
duv_cluster[0]=mvn_dp[idx_vec_corr]->val[cntc][0];
duv_cluster[1]=mvn_dp[idx_vec_corr]->val[cntc][1];
sq_dist=(duv_interp->val[0][0]-duv_cluster[0])*(duv_interp->val[0][0]-duv_cluster[0])+(duv_interp->val[0][1]-duv_cluster[1])*(duv_interp->val[0][1]-duv_cluster[1]);
//NOTE: data type casting is expected at the line above
if(sq_dist<sq_dist_min)
{
flag_update_grid=1;
sq_dist_min=sq_dist;
id_cluster_closest=cntc;
}
}
if(flag_update_grid)
{
duv_grid[0]=mvn_dp[idx_vec_corr]->val[id_cluster][0];
duv_grid[1]=mvn_dp[idx_vec_corr]->val[id_cluster][1];
duv_candidate[0]=mvn_dp[idx_vec_corr]->val[id_cluster_closest][0];
duv_candidate[1]=mvn_dp[idx_vec_corr]->val[id_cluster_closest][1];
}
if((duv_grid[0]-duv_candidate[0])*(duv_grid[0]-duv_candidate[0]) +(duv_grid[1]-duv_candidate[1])*(duv_grid[1]-duv_candidate[1]) < 0.0001 ) //TODO: make the threshold adjustable
{
continue;
}
else
{
dxy_ps_buffer[0]->val[cntv][cntu]=duv_candidate[0];
dxy_ps_buffer[1]->val[cntv][cntu]=duv_candidate[1];
dpf_ps_buffer->val[cntv][cntu]=id_cluster_closest;
flag_any_modification=1;
//mark the neighboring location to investigate in the next iteration
for(cntn=0;cntn<num_neighbor;cntn++)
{
if(stack_mask_investigate[0]->val[cntv+uv_neighbor->val[cntn][1]][cntu+uv_neighbor->val[cntn][0]])
//NOTE: The 1st layer of this stack indicates dpf0 and non-matching results grids
{
mask_investigate_next->val[cntv+uv_neighbor->val[cntn][1]][cntu+uv_neighbor->val[cntn][0]]=1;
}
}
}
}
}
}//for(cntu=0;cntu<dimx_vmap;cntu++)
}//for(cntv=0;cntv<dimy_vmap;cntv++)
//update and reset dpf_dx, dpf_dy and dpf
for(cntu=0;cntu<dimx_vmap;cntu++)
{
for(cntv=0;cntv<dimy_vmap;cntv++)
{
if(dpf_ps_buffer->val[cntv][cntu]>=0)
{
dpf_dx->val[cntv][cntu]=dxy_ps_buffer[0]->val[cntv][cntu];
dpf_dy->val[cntv][cntu]=dxy_ps_buffer[1]->val[cntv][cntu];
dpf->val[cntv][cntu]=dpf_ps_buffer->val[cntv][cntu];
dxy_ps_buffer[0]->val[cntv][cntu]=N_A_N;
dxy_ps_buffer[1]->val[cntv][cntu]=N_A_N;
dpf_ps_buffer->val[cntv][cntu]=-1;
}
}
}
//check the fluctuation
for(cntn=NOI-1;cntn>=0;cntn--)
{
char flag_difference_found=0;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
if(stack_mask_investigate[cntn]->val[cntv][cntu]!=mask_investigate_next->val[cntv][cntu])
{
flag_difference_found=1;
break;
}
}
if(flag_difference_found)
{
break;
}
}
if(flag_difference_found==0)
{
printf("Fluctuation detected: NOI=%d, cntn=%d\n",NOI,cntn);
flag_fluctuation=1;
break;
}
}
if(flag_fluctuation)
{
printf("Fluctuation was detected - Terminating the pseudosmoothing.\n");
NOI--;
break;
}
else
{
//stack up mask_investigate_next to stack
stack_mask_investigate[NOI]=GMA_uint8_create(dimy_vmap,dimx_vmap);
int32_t num_grid_process=0;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimx_vmap;cntu++)
{
stack_mask_investigate[NOI]->val[cntv][cntu]=mask_investigate_next->val[cntv][cntu];
if(mask_investigate_next->val[cntv][cntu])
{
num_grid_process++;
}
}
}
printf("NOI=%d, # grids=%d\n",NOI,num_grid_process);
}
}//while(NOI<100 && flag_any_modification && ~flag_fluctuation)
//de-allocate the arrays
for(cntn=0;cntn<=NOI;cntn++)
{
GMA_uint8_destroy(stack_mask_investigate[cntn]);
}
free(stack_mask_investigate);
GMA_uint8_destroy(mask_investigate_layer[0]);
GMA_uint8_destroy(mask_investigate_layer[1]);
uv_neighbor->nrows=ruv_neighbor->nrows; //revert the original size of the array that have altered during the while loop
duv_neighbor->nrows=ruv_neighbor->nrows;
w->nrows=ruv_neighbor->nrows;
GMA_int32_destroy(uv_neighbor);
GMA_double_destroy(uvoi);
GMA_double_destroy(duv_neighbor);
GMA_double_destroy(w);
GMA_double_destroy(duv_interp);
GMA_float_destroy(dxy_ps_buffer[0]);
GMA_float_destroy(dxy_ps_buffer[1]);
free(dxy_ps_buffer);
}
void quadfit2(GMA_int32 *xy, GMA_double *z, GMA_double *w, GMA_double *xyi, GMA_double *out)
{
int32_t num_obs=xy->nrows;
GMA_double *A=GMA_double_create(num_obs,6);
GMA_double *N=GMA_double_create(6,6);
GMA_double *IN=GMA_double_create(6,6);
double terms_quadratic[6];
double coeff_quadratic[6];
GMA_double *atwb=GMA_double_create(6,1);
double x,y;
int32_t cnt_row,cnt_col,cnt_obs,cnt_outcol;
//calculate A
for(cnt_row=0;cnt_row<num_obs;cnt_row++)
{
x=(double)xy->val[cnt_row][0];
y=(double)xy->val[cnt_row][1];
A->val[cnt_row][0]=x*x;
A->val[cnt_row][1]=x*y;
A->val[cnt_row][2]=y*y;
A->val[cnt_row][3]=x;
A->val[cnt_row][4]=y;
A->val[cnt_row][5]=1;
}
//calculate N
for(cnt_row=0;cnt_row<6;cnt_row++) //TODO: consider loop unrolling
{
for(cnt_col=0;cnt_col<6;cnt_col++)
{
N->val[cnt_row][cnt_col]=0;
for(cnt_obs=0;cnt_obs<num_obs;cnt_obs++)
{
N->val[cnt_row][cnt_col]+=A->val[cnt_obs][cnt_row]*w->val[cnt_obs][0]*A->val[cnt_obs][cnt_col];
}
}
}
//calculate IN
//printf("Calculating inverse of normal matrix\n");
GMA_double_inv(N,IN);
//printf("Calculating coefficient\n");
for(cnt_outcol=0;cnt_outcol<out->ncols;cnt_outcol++)
{
//calculate atwb
for(cnt_row=0;cnt_row<6;cnt_row++)
{
atwb->val[cnt_row][0]=0;
for(cnt_obs=0;cnt_obs<num_obs;cnt_obs++)
{
atwb->val[cnt_row][0]+=A->val[cnt_obs][cnt_row]*w->val[cnt_obs][0]*z->val[cnt_obs][cnt_outcol];
}
}
//calculate coeff_quadratic
//NI: 6 by 6
//atwb: 6 by 1
for(cnt_row=0;cnt_row<6;cnt_row++)
{
coeff_quadratic[cnt_row]=0;
for(cnt_col=0;cnt_col<6;cnt_col++)
{
//coeff_quadratic->val[cnt_row][0]+=IN->val[cnt_row][cnt_col]*atwb->val[cnt_col][0];
coeff_quadratic[cnt_row]+=IN->val[cnt_row][cnt_col]*atwb->val[cnt_col][0];
}
}
//calculate the interpolated values using the calculated coefficients
for(cnt_row=0;cnt_row<out->nrows;cnt_row++)
{
terms_quadratic[0]=xyi->val[cnt_row][0]*xyi->val[cnt_row][0];
terms_quadratic[1]=xyi->val[cnt_row][0]*xyi->val[cnt_row][1];
terms_quadratic[2]=xyi->val[cnt_row][1]*xyi->val[cnt_row][1];
terms_quadratic[3]=xyi->val[cnt_row][0];
terms_quadratic[4]=xyi->val[cnt_row][1];
terms_quadratic[5]=1;
out->val[cnt_row][cnt_outcol]=0;
for(cnt_col=0;cnt_col<6;cnt_col++)
{
out->val[cnt_row][cnt_outcol]+=terms_quadratic[cnt_col]*coeff_quadratic[cnt_col];
}
}
}
GMA_double_destroy(A);
GMA_double_destroy(N);
GMA_double_destroy(IN);
GMA_double_destroy(atwb);
}
void GMA_double_mul(GMA_double *a, GMA_double *b, GMA_double *out)
{
int32_t cnt1,cnt2,cnt3;
for(cnt1=0;cnt1<a->nrows;cnt1++) //TODO: consider loop unrolling
//TODO: consider applying OMP multithreading
{
for(cnt2=0;cnt2<b->ncols;cnt2++)
{
out->val[cnt1][cnt2]=0;
for(cnt3=0;cnt3<a->ncols;cnt3++)
{
out->val[cnt1][cnt2]+=a->val[cnt1][cnt3]*b->val[cnt3][cnt2];
}
}
}
}
void GMA_double_inv(GMA_double *a, GMA_double *I)
{
int32_t cnt1,cnt2,cnt3;
double pivot,coeff;
GMA_double *b=GMA_double_create(a->nrows,a->ncols);
//duplicate the matrix a
for(cnt1=0;cnt1<a->nrows;cnt1++)
{
for(cnt2=0;cnt2<a->ncols;cnt2++)
{
b->val[cnt1][cnt2]=a->val[cnt1][cnt2];
}
}
//initialize I
for(cnt1=0;cnt1<b->nrows;cnt1++)
{
for(cnt2=0;cnt2<b->nrows;cnt2++)
{
I->val[cnt1][cnt2]=(cnt1==cnt2)?1:0;
}
}
//Gaussian elimation - forward
for(cnt1=0;cnt1<b->nrows-1;cnt1++)
{
pivot=b->val[cnt1][cnt1];
for(cnt2=cnt1+1;cnt2<b->ncols;cnt2++)
{
coeff=b->val[cnt2][cnt1]/pivot;
for(cnt3=0;cnt3<b->nrows;cnt3++)
{
b->val[cnt2][cnt3]-=b->val[cnt1][cnt3]*coeff;
I->val[cnt2][cnt3]-=I->val[cnt1][cnt3]*coeff;
}
}
}
//Backward Elimination
for(cnt1=b->nrows-1;cnt1>=0;cnt1--)
{
pivot=b->val[cnt1][cnt1];
for(cnt2=cnt1-1;cnt2>=0;cnt2--)
{
coeff=b->val[cnt2][cnt1]/pivot;
for(cnt3=b->nrows-1;cnt3>=0;cnt3--)
{
b->val[cnt2][cnt3]-=b->val[cnt1][cnt3]*coeff;
I->val[cnt2][cnt3]-=I->val[cnt1][cnt3]*coeff;
}
}
}
//scaling
for(cnt1=0;cnt1<b->nrows;cnt1++)
{
for(cnt2=0;cnt2<b->nrows;cnt2++)
{
I->val[cnt1][cnt2]/=b->val[cnt1][cnt1];
}
}
GMA_double_destroy(b);
}
void convert_dpf_to_vxy_exy_qual(GMA_int32 *dpf, GMA_float **mvn_dp, GMA_float **vxy_exy_qual)
{
int32_t cntv,cntu,idx_vec_corr;
int32_t id_cluster;
for(cntv=0;cntv<dimy_vmap;cntv++)
{
for(cntu=0;cntu<dimy_vmap;cntu++)
{
idx_vec_corr=cntv*dimx_vmap+cntu;
id_cluster=dpf->val[cntv][cntu];
vxy_exy_qual[0]->val[cntv][cntu]=mvn_dp[idx_vec_corr]->val[id_cluster][0];
vxy_exy_qual[1]->val[cntv][cntu]=mvn_dp[idx_vec_corr]->val[id_cluster][1];
vxy_exy_qual[2]->val[cntv][cntu]=mvn_dp[idx_vec_corr]->val[id_cluster][2];
vxy_exy_qual[3]->val[cntv][cntu]=mvn_dp[idx_vec_corr]->val[id_cluster][3];
vxy_exy_qual[4]->val[cntv][cntu]=mvn_dp[idx_vec_corr]->val[id_cluster][4];
}
}
}
void GMA_float_conv2(GMA_float *in, GMA_float *kernel, GMA_float *out)
{
int32_t cnt1, cnt2, cnt3,cnt4;
int32_t dimx_in, dimy_in, dimx_k, dimy_k;
int32_t ocwx, ocwy;
float sum_dn,numel_k,dn_in,dn_min;
const float N_A_N=sqrt(-1);
dimx_in=(int32_t)in->ncols;
dimy_in=(int32_t)in->nrows;
dimx_k=(int32_t)kernel->ncols;
dimy_k=(int32_t)kernel->nrows;
//printf("Kernel:\n");
//GMA_float_print(kernel);
numel_k=(float)(dimy_k*dimx_k);
ocwx=dimx_k/2;
ocwy=dimy_k/2;
//NaN filling
for(cnt1=ocwy;cnt1<dimy_in-ocwy;cnt1++)
{
for(cnt2=ocwx;cnt2<dimx_in-ocwx;cnt2++)
{
sum_dn=0;
for(cnt3=0;cnt3<dimy_k;cnt3++)
{
for(cnt4=0;cnt4<dimx_k;cnt4++)
{
dn_in=(int32_t)(in->val[cnt1+cnt3-ocwy][cnt2+cnt4-ocwx]+0.5)?in->val[cnt1+cnt3-ocwy][cnt2+cnt4-ocwx]:N_A_N;
sum_dn+=dn_in*kernel->val[cnt3][cnt4];
}
}
out->val[cnt1][cnt2]=sum_dn;
}
}
//find the minimum DN
dn_min=1e+37;
for(cnt1=0;cnt1<dimy_in;cnt1++)
{
for(cnt2=0;cnt2<dimx_in;cnt2++)
{
if(out->val[cnt1][cnt2]<dn_min)
{
dn_min=out->val[cnt1][cnt2];
}
}
}
//shift the DN and substitute NaN values into 0
for(cnt1=ocwy;cnt1<dimy_in-ocwy;cnt1++)
{
for(cnt2=ocwx;cnt2<dimx_in;cnt2++)
{
if(isnan(out->val[cnt1][cnt2]))
{
out->val[cnt1][cnt2]=0;
}
else
{
out->val[cnt1][cnt2]-=dn_min-1;
}
}
}
}
int32_t GMA_float_find_median(GMA_float *in, int32_t column_of_interest)
{
int32_t out;
int32_t idx_order_median=(int32_t)((float)(in->nrows)/2+0.5);
//copy the
//TODO: Keep implementing
} |
cg_aux.h | #ifndef __CG_AUX_H__
#define __CG_AUX_H__
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <mpi.h>
#include <omp.h>
#define NBFPE 8
#define FP_SQRT sqrt
#define FP_RAND drand48
#define FP_SEED srand48
#define FP_ABS fabs
#define FP_EXP frexp
#define FP_LOG10 log10
#define FP_POW pow
#define FP_SCANSPEC scan_dconspec
#include "mkl.h"
#define BLAS_cp(n, dx, incx, dy, incy) cblas_dcopy(n, dx, incx, dy, incy)
#define BLAS_dot(n, dx, incx, dy, incy) cblas_ddot(n, dx, incx, dy, incy)
#define BLAS_axpy(n, da, dx, incx, dy, incy) cblas_daxpy(n, da, dx, incx, dy, incy)
#define BLAS_scal(n, da, dx, incx) cblas_dscal(n, da, dx, incx)
void bblas_dcopy(int bm, int m, double *X, double *Y);
void bblas_ddot(int bm, int m, double *X, double *Y, double *result);
void bblas_daxpy(int bm, int m, double f, double *X, double *Y);
void bblas_dscal(int bm, int m, double f, double *X);
void VvecDoublesTasks(int bm, int m, double *src1, double *src2, double *dst);
//#pragma omp task depend(in:X[initx:initx+bm-1]) depend(out:Y[inity:inity+bm-1])
void __t_copy(int bm, int m, double *X, double *Y, int initx, int inity);
//#pragma omp task depend(in:X[initx:initx+bm-1], Y[inity:inity+bm-1]) out(result)//concurrent(result[0:bn-1])
void __t_dot(int bm, int m, double *X, double *Y, int initx, int inity, double *result);
//#pragma omp task depend(in:X[0:bm-1], f) out(Y[0:bm-1])
void __t_axpy(int bm, int m, double f, double *X, double *Y);
//#pragma omp task depend(inout:X[0:bm-1],f)
void __t_scal(int bm, int m, double f, double *X);
#endif //__CG_AUX_H__
|
convolutiondepthwise_3x3_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + g * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn * 4, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 5, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(k0 + packn * 6, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn * 7, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 8, vl);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
vfloat32m1_t _sum00 = _bias0;
vfloat32m1_t _sum01 = _bias0;
vfloat32m1_t _sum10 = _bias0;
vfloat32m1_t _sum11 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k00, _r00, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k01, _r01, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k02, _r02, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k00, _r01, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k01, _r02, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k02, _r03, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
vfloat32m1_t _r13 = vle32_v_f32m1(r1 + packn * 3, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k10, _r10, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k11, _r11, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k12, _r12, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k10, _r11, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k11, _r12, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k12, _r13, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k00, _r10, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k01, _r11, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k02, _r12, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k00, _r11, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k01, _r12, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k02, _r13, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
vfloat32m1_t _r23 = vle32_v_f32m1(r2 + packn * 3, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k20, _r20, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k21, _r21, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k22, _r22, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k20, _r21, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k21, _r22, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k22, _r23, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k10, _r20, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k11, _r21, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k12, _r22, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k10, _r21, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k11, _r22, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k12, _r23, vl);
vfloat32m1_t _r30 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _r31 = vle32_v_f32m1(r3 + packn, vl);
vfloat32m1_t _r32 = vle32_v_f32m1(r3 + packn * 2, vl);
vfloat32m1_t _r33 = vle32_v_f32m1(r3 + packn * 3, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k20, _r30, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k21, _r31, vl);
_sum10 = vfmacc_vv_f32m1(_sum10, _k22, _r32, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k20, _r31, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k21, _r32, vl);
_sum11 = vfmacc_vv_f32m1(_sum11, _k22, _r33, vl);
vse32_v_f32m1(outptr0, _sum00, vl);
vse32_v_f32m1(outptr0 + packn, _sum01, vl);
vse32_v_f32m1(outptr1, _sum10, vl);
vse32_v_f32m1(outptr1 + packn, _sum11, vl);
outptr0 += packn * 2;
outptr1 += packn * 2;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
r3 += packn * 2;
}
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = _bias0;
vfloat32m1_t _sum1 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k02, _r02, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k12, _r12, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k00, _r10, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k01, _r11, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k02, _r12, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k22, _r22, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k10, _r20, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k11, _r21, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k12, _r22, vl);
vfloat32m1_t _r30 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _r31 = vle32_v_f32m1(r3 + packn, vl);
vfloat32m1_t _r32 = vle32_v_f32m1(r3 + packn * 2, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k20, _r30, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k21, _r31, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k22, _r32, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr1, _sum1, vl);
outptr0 += packn;
outptr1 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
r3 += packn;
}
r0 += 2 * packn + w * packn;
r1 += 2 * packn + w * packn;
r2 += 2 * packn + w * packn;
r3 += 2 * packn + w * packn;
outptr0 += outw * packn;
outptr1 += outw * packn;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
vfloat32m1_t _sum00 = _bias0;
vfloat32m1_t _sum01 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k00, _r00, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k01, _r01, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k02, _r02, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k00, _r01, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k01, _r02, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k02, _r03, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
vfloat32m1_t _r13 = vle32_v_f32m1(r1 + packn * 3, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k10, _r10, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k11, _r11, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k12, _r12, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k10, _r11, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k11, _r12, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k12, _r13, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
vfloat32m1_t _r23 = vle32_v_f32m1(r2 + packn * 3, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k20, _r20, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k21, _r21, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k22, _r22, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k20, _r21, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k21, _r22, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k22, _r23, vl);
vse32_v_f32m1(outptr0, _sum00, vl);
vse32_v_f32m1(outptr0 + packn, _sum01, vl);
outptr0 += packn * 2;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
}
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k02, _r02, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k12, _r12, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k22, _r22, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
}
r0 += 2 * packn;
r1 += 2 * packn;
r2 += 2 * packn;
}
}
}
static void convdw3x3s2_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * packn;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + g * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn * 4, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 5, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(k0 + packn * 6, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn * 7, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 8, vl);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
vfloat32m1_t _sum00 = _bias0;
vfloat32m1_t _sum01 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k00, _r00, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k01, _r01, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k02, _r02, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k00, _r02, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k01, _r03, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k02, _r04, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
vfloat32m1_t _r13 = vle32_v_f32m1(r1 + packn * 3, vl);
vfloat32m1_t _r14 = vle32_v_f32m1(r1 + packn * 4, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k10, _r10, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k11, _r11, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k12, _r12, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k10, _r12, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k11, _r13, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k12, _r14, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
vfloat32m1_t _r23 = vle32_v_f32m1(r2 + packn * 3, vl);
vfloat32m1_t _r24 = vle32_v_f32m1(r2 + packn * 4, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k20, _r20, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k21, _r21, vl);
_sum00 = vfmacc_vv_f32m1(_sum00, _k22, _r22, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k20, _r22, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k21, _r23, vl);
_sum01 = vfmacc_vv_f32m1(_sum01, _k22, _r24, vl);
vse32_v_f32m1(outptr0, _sum00, vl);
vse32_v_f32m1(outptr0 + packn, _sum01, vl);
outptr0 += packn * 2;
r0 += packn * 4;
r1 += packn * 4;
r2 += packn * 4;
}
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k02, _r02, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k12, _r12, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k22, _r22, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
lsearch.h | #pragma once
#ifndef FGC_LOCAL_SEARCH_H__
#define FGC_LOCAL_SEARCH_H__
#include "diskmat/diskmat.h"
#include "minicore/util/oracle.h"
#include "minicore/optim/kcenter.h"
#include "discreture/include/discreture.hpp"
#include "libsimdsampling/argminmax.h"
#include <atomic>
/*
* In this file, we use the local search heuristic for k-median.
* Originally described in "Local Search Heuristics for k-median and Facility Location Problems",
* Vijay Arya, Naveen Garg, Rohit Khandekar, Adam Meyerson, Kamesh Munagala, Vinayaka Pandit
* (http://theory.stanford.edu/~kamesh/lsearch.pdf)
*/
namespace minicore {
namespace graph {
template<typename MatType, typename IType=std::uint32_t, size_t N=16>
struct ExhaustiveSearcher {
using value_type = typename MatType::ElementType;
const MatType &mat_;
blaze::SmallArray<IType, N> bestsol_;
double current_cost_;
const unsigned k_;
ExhaustiveSearcher(const MatType &mat, unsigned k): mat_(mat), bestsol_(k_), current_cost_(std::numeric_limits<double>::max()), k_(k) {}
void run() {
blaze::SmallArray<IType, N> csol(k_);
const size_t nr = mat_.rows();
size_t nchecked = 0;
for(auto &&comb: discreture::combinations(nr, k_)) {
const double cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, comb.data(), comb.size())));
++nchecked;
if((nchecked & (nchecked - 1)) == 0)
std::fprintf(stderr, "iteration %zu completed\n", nchecked);
if(cost < current_cost_) {
std::fprintf(stderr, "Swapping to new center set with new cost = %g on iteration %zu\n", cost, nchecked);
current_cost_ = cost;
std::copy(comb.data(), comb.data() + comb.size(), bestsol_.data());
}
}
std::fprintf(stderr, "Best result: %g. Total number of combinations checked: %zu\n", current_cost_, nchecked);
}
};
template<typename MatType, typename IType=std::uint32_t>
auto make_kmed_esearcher(const MatType &mat, unsigned k) {
return ExhaustiveSearcher<MatType, IType>(mat, k);
}
template<typename MatType, typename IType>
struct LocalKMedSearcher {
using value_type = typename MatType::ElementType;
static_assert(std::is_integral_v<IType>, "IType must be integral");
const MatType &mat_;
shared::flat_hash_set<IType> sol_;
blaze::DynamicVector<IType> assignments_;
blaze::DynamicVector<typename MatType::ElementType, blaze::rowVector> current_costs_;
double current_cost_;
double eps_, initial_cost_, init_cost_div_;
IType k_;
const size_t nr_, nc_;
double diffthresh_;
blaze::DynamicVector<IType> ordering_;
uint32_t shuffle_:1;
// Set to 0 to avoid lazy search, 1 to only do local search, and 2 to do lazy search and then use exhaustive
uint32_t lazy_eval_:15;
uint32_t max_swap_n_:16;
// if(max_swap_n_ > 1), after exhaustive single-swap optimization, enables multiswap search.
// TODO: enable searches for multiswaps.
// Constructors
LocalKMedSearcher(const LocalKMedSearcher &o) = default;
LocalKMedSearcher(LocalKMedSearcher &&o) {
auto ptr = reinterpret_cast<const uint8_t *>(this);
std::memset(ptr, 0, sizeof(*this));
std::swap_ranges(ptr, ptr + sizeof(*this), reinterpret_cast<const uint8_t *>(std::addressof(o)));
}
template<typename IndexContainer=std::vector<uint32_t>>
LocalKMedSearcher(const MatType &mat, unsigned k, double eps=1e-8, uint64_t seed=0,
const IndexContainer *wc=nullptr, double initdiv=0.):
mat_(mat), assignments_(mat.columns(), 0),
// center_indices_(k),
//costs_(mat.columns(), std::numeric_limits<value_type>::max()),
//counts_(k),
current_cost_(std::numeric_limits<value_type>::max()),
eps_(eps),
k_(k), nr_(mat.rows()), nc_(mat.columns()),
ordering_(mat.rows()), shuffle_(true), lazy_eval_(2), max_swap_n_(1)
{
std::iota(ordering_.begin(), ordering_.end(), 0);
static_assert(std::is_integral_v<std::decay_t<decltype(wc->operator[](0))>>, "index container must contain integral values");
sol_.reserve(k);
init_cost_div_ = initdiv ? initdiv: double(mat.columns());
reseed(seed, true, wc);
}
template<typename It>
void assign_centers(It start, It end) {
sol_.clear();
sol_.insert(start, end);
assignments_ = 0;
}
template<typename RNG>
std::pair<shared::flat_hash_set<IType>, value_type> my_kcenter(RNG &rng, int extratries=2) {
shared::flat_hash_set<IType> ret;
ret.clear();
ret.insert(rng() % mat_.rows());
auto cid = *ret.begin();
constexpr bool rowitude = blz::IsRowMajorMatrix_v<MatType> ? blz::rowVector: blz::columnVector;
blz::DV<blz::ElementType_t<MatType>, rowitude> costs = row(mat_, cid);
using FT = value_type;
using IT = IType;
using PT = std::pair<FT, IT>;
auto fill_set = [&](auto &set) -> value_type {
while(set.size() < std::min(size_t(k_), mat_.rows())) {
PT argmaxcost;
argmaxcost.second = reservoir_simd::argmax(costs);
argmaxcost.first = costs[argmaxcost.second];
assert(set.find(argmaxcost.second) == set.end());
set.insert(argmaxcost.second);
costs = blz::min(row(mat_, argmaxcost.second), costs);
}
return blz::sum(costs);
};
value_type retcost = fill_set(ret), nextcost;
while(extratries > 0) {
IType cid = rng() % mat_.rows();
shared::flat_hash_set<IType> tmpset{cid};
costs = row(mat_, cid);
if((nextcost = fill_set(tmpset)) < retcost)
std::tie(nextcost, tmpset) = std::move(std::tie(retcost, ret));
}
return std::make_pair(ret, retcost);
}
template<typename IndexContainer=std::vector<uint32_t>>
void reseed(uint64_t seed, bool do_kcenter=false, const IndexContainer *wc=nullptr, unsigned extra_kc=0) {
assignments_ = 0;
current_cost_ = std::numeric_limits<value_type>::max();
wy::WyRand<IType, 2> rng(seed);
sol_.clear();
if(mat_.rows() <= k_) {
for(unsigned i = 0; i < mat_.rows(); ++i)
sol_.insert(i);
} else if(do_kcenter && mat_.rows() == mat_.columns()) {
std::fprintf(stderr, "Using kcenter\n");
std::tie(sol_, current_cost_) = my_kcenter(rng, extra_kc);
#ifndef NDEBUG
std::fprintf(stderr, "k_: %u. sol size: %zu. rows: %zu. columns: %zu. %d kcenter tries.\n", k_, sol_.size(),
mat_.rows(), mat_.columns(), extra_kc);
#endif
assert(sol_.size() == k_ || sol_.size() == mat_.rows());
} else {
if(!do_kcenter || wc == nullptr || wc->size() != mat_.columns()) {
while(sol_.size() < k_)
sol_.insert(rng() % mat_.rows());
} else {
//std::fprintf(stderr, "Using submatrix to perform kcenter approximation on an asymmetric matrix. rows/cols before: %zu, %zu\n", mat_.rows(), mat_.columns());
blaze::DynamicMatrix<value_type> subm = blaze::rows(mat_, wc->data(), wc->size());
//std::cerr << subm << '\n';
//std::fprintf(stderr, "subm rows: %zu\n", subm.rows());
uint32_t first = rng() % subm.rows();
std::vector<uint32_t> approx{first};
blaze::DynamicVector<value_type, blaze::rowVector> mincosts = row(subm, first);
std::vector<uint32_t> remaining(subm.rows());
std::iota(remaining.begin(), remaining.end(), 0u);
while(approx.size() < std::min(subm.rows(), size_t(k_))) {
//std::fputc('\n', stderr);
double maxcost = -1.;
unsigned maxind = -1;
for(unsigned i = 0; i < remaining.size(); ++i) {
auto ri = remaining[i];
if(std::find(approx.begin(), approx.end(), ri) != approx.end()) continue;
auto r = row(subm, ri);
auto cost = blaze::max(r);
if(cost > maxcost) maxcost = cost, maxind = i;
}
auto nextind = remaining[maxind];
approx.push_back(nextind);
std::swap(remaining[maxind], remaining.back());
remaining.pop_back();
mincosts = blaze::min(mincosts, row(subm, nextind));
}
for(auto i: approx)
sol_.insert(wc->at(i));
while(sol_.size() < k_) {
// Add random entries until desired sizeA
sol_.insert(rng() % mat_.rows());
}
//std::fprintf(stderr, "used submatrix. sol size: %zu\n", sol_.size());
}
}
}
template<typename Container>
double cost_for_sol(const Container &c) const {
double ret = 0.;
OMP_PRAGMA("omp parallel for reduction(+:ret)")
for(size_t i = 0; i < mat_.columns(); ++i) {
auto col = column(mat_, i);
auto it = c.begin();
value_type minv = col[*it];
while(++it != c.end())
minv = std::min(col[*it], minv);
ret += minv;
}
return ret;
}
// Setup/Utilities
void assign() {
assert(assignments_.size() == nc_);
std::fprintf(stderr, "rows: %zu. cols: %zu. sol size: %zu. k: %u\n",
mat_.rows(), mat_.columns(), sol_.size(), k_);
assert(sol_.size() == k_ || sol_.size() == mat_.rows());
auto it = sol_.begin();
const auto eit = sol_.end();
assignments_ = *it;
current_costs_ = row(mat_, *it);
while(++it != eit) {
auto center = *it;
auto r = row(mat_, center);
OMP_PFOR
for(size_t ci = 0; ci < nc_; ++ci) {
auto asn = assignments_[ci];
if(const auto newcost = r[ci];
newcost < mat_(asn, ci))
{
current_costs_[ci] = newcost;
assignments_[ci] = center;
}
}
}
DBG_ONLY(std::fprintf(stderr, "Set assignments for size %zu\n", assignments_.size());)
current_cost_ = cost_for_sol(sol_);
DBG_ONLY(std::fprintf(stderr, "Got costs for size %zu with centers size = %zu\n", assignments_.size(), sol_.size());)
initial_cost_ = current_cost_ / 2 / init_cost_div_;
}
double evaluate_swap(IType newcenter, IType oldcenter, bool single_threaded=false) const {
blaze::SmallArray<IType, 16> as(sol_.begin(), sol_.end());
*std::find(as.begin(), as.end(), oldcenter) = newcenter;
double cost;
if(single_threaded) {
cost = blaze::serial(blaze::sum(blaze::serial(blaze::min<blaze::columnwise>(rows(mat_, as)))));
} else cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, as)));
return current_cost_ - cost;
}
template<size_t N, typename IndexType>
double evaluate_multiswap(const IndexType *newcenter, const IndexType *oldcenter, bool single_threaded=false) const {
blaze::SmallArray<IType, 16> as(sol_.begin(), sol_.end());
shared::sort(as.begin(), as.end());
for(size_t i = 0; i < N; ++i) {
*std::find(as.begin(), as.end(), oldcenter[i]) = newcenter[i];
}
double cost;
if(single_threaded) {
cost = blaze::serial(blaze::sum(blaze::serial(blaze::min<blaze::columnwise>(rows(mat_, as)))));
} else
cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, as)));
return current_cost_ - cost;
}
template<typename IndexType>
double evaluate_multiswap_rt(const IndexType *newcenter, const IndexType *oldcenter, size_t N, bool single_threaded=false) const {
switch(N) {
case 2: return evaluate_multiswap<2>(newcenter, oldcenter, single_threaded);
case 3: return evaluate_multiswap<3>(newcenter, oldcenter, single_threaded);
}
blaze::SmallArray<IType, 16> as(sol_.begin(), sol_.end());
for(size_t i = 0; i < N; ++i) {
*std::find(as.begin(), as.end(), oldcenter[i]) = newcenter[i];
}
shared::sort(as.begin(), as.end());
double cost;
if(single_threaded) {
cost = blaze::serial(blaze::sum(blaze::serial(blaze::min<blaze::columnwise>(rows(mat_, as)))));
} else
cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, as)));
return current_cost_ - cost;
}
template<size_t N>
double lazy_evaluate_multiswap(const IType *newcenters, const IType *oldcenters) const {
// Instead of performing the full recalculation, do lazy calculation.
//
std::vector<IType> tmp(sol_.begin(), sol_.end());
for(unsigned i = 0; i < N; ++i)
tmp.erase(std::find(tmp.begin(), tmp.end(), oldcenters[i]));
std::sort(tmp.begin(), tmp.end());
// Instead of performing the full recalculation, do lazy calculation.
if(current_costs_.size() != nc_) { // If not calculated, calculate
auto it = sol_.begin();
OMP_CRITICAL
{
current_costs_ = row(mat_, *it BLAZE_CHECK_DEBUG);
}
while(++it != sol_.end()) {
current_costs_ = blaze::min(current_costs_, row(mat_, *it BLAZE_CHECK_DEBUG));
}
}
blaze::DynamicVector<typename MatType::ElementType, blaze::rowVector> newptr = blaze::min<blaze::rowwise>(rows(mat_, newcenters, N));
blaze::DynamicVector<typename MatType::ElementType, blaze::rowVector> oldptr = blaze::min<blaze::rowwise>(rows(mat_, oldcenters, N));
double diff = 0.;
#ifdef _OPENMP
_Pragma("omp parallel for reduction(+:diff)")
#endif
for(size_t i = 0; i < nc_; ++i) {
auto ccost = current_costs_[i];
if(newptr[i] < ccost) {
auto sub = ccost - newptr[i];
diff += sub;
} else if(ccost == oldptr[i]) {
auto oldbest = blaze::min(blaze::elements(blaze::column(mat_, i), tmp.data(), tmp.size()));
auto sub = ccost - std::min(oldbest, newptr[i]);
diff += sub;
}
}
return diff;
}
// Getters
auto k() const {
return k_;
}
void run_lazy() {
#if 0
shared::flat_hash_map<IType, std::vector<IType>> current_assignments;
for(size_t i = 0; i < assignments_.size(); ++i) {
current_assignments[assignments_[i]].push_back(i);
}
#endif
size_t total = 0;
std::vector<IType> newindices(sol_.begin(), sol_.end());
next:
for(const auto oldcenter: sol_) {
newindices.assign(sol_.begin(), sol_.end());
std::swap(*std::find(newindices.begin(), newindices.end(), oldcenter), newindices.back());
if(shuffle_) {
wy::WyRand<uint64_t, 2> rng(total);
std::shuffle(ordering_.begin(), ordering_.end(), rng);
}
// Make a vector with the original solution, but replace the old value with the new value
for(size_t pi = 0; pi < nr_; ++pi) {
auto potential_index = ordering_[pi];
if(sol_.find(potential_index) != sol_.end() || potential_index == oldcenter) continue;
newindices.back() = potential_index;
assert(std::find(newindices.begin(), newindices.end(), oldcenter) == newindices.end());
double val = 0.;
auto newptr = row(mat_, potential_index);
#ifdef _OPENMP
#pragma omp parallel for reduction(+:val)
#endif
for(size_t i = 0; i < nc_; ++i) {
auto oldcost = current_costs_[i];
if(newptr[i] < oldcost) {
auto diff = oldcost - newptr[i];
val += diff;
} else if(assignments_[i] == oldcenter) {
auto mincost = blaze::min(blaze::elements(blaze::column(mat_, i), newindices.data(), newindices.size()));
auto diff = oldcost - mincost;
val += diff;
}
}
assert(sol_.size() == k_);
// Only calculate exhaustively if the lazy form returns yes.
if(val > diffthresh_ && (val = evaluate_swap(potential_index, oldcenter)) > diffthresh_) {
assert(sol_.size() == k_);
sol_.erase(oldcenter);
sol_.insert(potential_index);
assert(sol_.size() == k_);
assign();
//current_cost_ = blaze::sum(current_costs_);
++total;
std::fprintf(stderr, "Swap number %zu updated with delta %.12g to new cost with cost %0.12g\n", total, val, current_cost_);
goto next;
}
}
}
std::fprintf(stderr, "Finished in %zu swaps by exhausting all potential improvements. Final cost: %f\n",
total, current_cost_);
}
void run_multi(unsigned nswap=1) {
if(mat_.rows() <= k_) return;
if(nswap == 1) {
run();
return;
}
if(nswap >= k_) throw std::runtime_error("nswap >= k_");
assign();
const double diffthresh = initial_cost_ / k_ * eps_;
diffthresh_ = diffthresh;
next:
{
blaze::DynamicVector<IType> csol(sol_.size());
std::copy(sol_.begin(), sol_.end(), csol.data());
blaze::DynamicVector<IType> swap_in(nc_ - sol_.size());
blaze::DynamicVector<IType> inargs(nswap), outargs(nswap);
for(auto &&swap_out_comb: discreture::combinations(csol.size(), nswap)) {
for(auto &&swap_in_comb: discreture::combinations(swap_in.size(), nswap)) {
auto v = evaluate_multiswap_rt(swap_in_comb.data(), swap_out_comb.data(), nswap);
if(v >= diffthresh_) {
for(auto v: swap_out_comb) sol_.erase(v);
sol_.insert(swap_in_comb.begin(), swap_in_comb.end());
current_cost_ -= v;
goto next;
}
}
}
}
}
void run() {
assign();
const double diffthresh = initial_cost_ / k_ * eps_;
diffthresh_ = diffthresh;
if(mat_.rows() <= k_) return;
if(lazy_eval_) {
run_lazy();
if(lazy_eval_ > 1)
return;
}
//const double diffthresh = 0.;
std::fprintf(stderr, "diffthresh: %f\n", diffthresh);
size_t total = 0;
next:
for(const auto oldcenter: sol_) {
if(shuffle_) {
wy::WyRand<uint64_t, 2> rng(total);
std::shuffle(ordering_.begin(), ordering_.end(), rng);
}
std::vector<IType> newindices(sol_.begin(), sol_.end());
for(size_t pi = 0; pi < nr_; ++pi) {
size_t potential_index = ordering_[pi];
if(sol_.find(potential_index) != sol_.end()) continue;
if(const auto val = evaluate_swap(potential_index, oldcenter, true);
val > diffthresh) {
#ifndef NDEBUG
std::fprintf(stderr, "Swapping %zu for %u. Swap number %zu. Current cost: %g. Improvement: %g. Threshold: %g.\n", potential_index, oldcenter, total + 1, current_cost_, val, diffthresh);
#endif
sol_.erase(oldcenter);
sol_.insert(potential_index);
++total;
current_cost_ -= val;
std::fprintf(stderr, "Swap number %zu with cost %0.12g\n", total, current_cost_);
goto next;
}
}
}
std::fprintf(stderr, "Finished in %zu swaps by exhausting all potential improvements. Final cost: %f\n",
total, current_cost_);
if(max_swap_n_ > 1) {
std::fprintf(stderr, "max_swap_n_ %u set. Searching multiswaps\n", max_swap_n_);
run_multi(max_swap_n_);
}
}
void exhaustive_manual_check() {
const std::vector<IType> csol(sol_.begin(), sol_.end());
std::vector<IType> wsol = csol, fsol = csol;
double ccost = current_cost_;
#ifndef NDEBUG
double ocost = current_cost_;
#endif
size_t extra_rounds = 0;
bool improvement_made;
start:
improvement_made = false;
for(size_t si = 0; si < k_; ++si) {
for(size_t ci = 0; ci < nr_; ++ci) {
if(std::find(wsol.begin(), wsol.end(), ci) != wsol.end()) continue;
wsol[si] = ci;
const double cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, wsol)));
if(cost < ccost) {
std::fprintf(stderr, "Found a better one: %g vs %g (%g)\n", cost, ccost, ccost - cost);
ccost = cost;
fsol = wsol;
wsol = fsol;
improvement_made = true;
++extra_rounds;
goto start;
}
}
wsol[si] = csol[si];
}
if(improvement_made) goto start;
current_cost_ = ccost;
#ifndef NDEBUG
std::fprintf(stderr, "improved cost for %zu rounds and a total improvemnet of %g\n", extra_rounds, ocost - current_cost_);
//assert(std::abs(ocost - current_cost_) < ((initial_cost_ / k_ * eps_) + 0.1)); // 1e-5 for numeric stability issues
#endif
}
};
template<typename Mat, typename IType=std::uint32_t, typename IndexContainer=std::vector<uint32_t>>
auto make_kmed_lsearcher(const Mat &mat, unsigned k, double eps=0.01, uint64_t seed=0,
const IndexContainer *wc=nullptr, double initdiv=0.) {
return LocalKMedSearcher<Mat, IType>(mat, k, eps, seed, wc, initdiv);
}
} // graph
using graph::make_kmed_esearcher;
using graph::make_kmed_lsearcher;
using graph::LocalKMedSearcher;
using graph::ExhaustiveSearcher;
} // minicore
#endif /* FGC_LOCAL_SEARCH_H__ */
|
timer.h | #ifndef timer_h
#define timer_h
#include <iomanip>
#include <iostream>
#include <map>
#include <string>
#include <sys/time.h>
#include <unistd.h>
namespace exafmm_t {
static const int stringLength = 20; //!< Length of formatted string
static const int decimal = 7; //!< Decimal precision
static const int wait = 100; //!< Waiting time between output of different ranks
static const int dividerLength = stringLength + decimal + 9; // length of output section divider
long long flop = 0;
timeval time;
std::map<std::string, timeval> timer;
void print(std::string s) {
// if (!VERBOSE | (MPIRANK != 0)) return;
s += " ";
std::cout << "--- " << std::setw(stringLength) << std::left
<< std::setfill('-') << s << std::setw(decimal+1) << "-"
<< std::setfill(' ') << std::endl;
}
template<typename T>
void print(std::string s, T v, bool fixed=true) {
std::cout << std::setw(stringLength) << std::left << s << " : ";
if(fixed)
std::cout << std::setprecision(decimal) << std::fixed << std::scientific;
else
std::cout << std::setprecision(1) << std::scientific;
std::cout << v << std::endl;
}
void print_divider(std::string s) {
s.insert(0, " ");
s.append(" ");
int halfLength = (dividerLength - s.length()) / 2;
std::cout << std::string(halfLength, '-') << s
<< std::string(dividerLength-halfLength-s.length(), '-') << std::endl;
}
void add_flop(long long n) {
#pragma omp atomic update
flop += n;
}
void start(std::string event) {
gettimeofday(&time, NULL);
timer[event] = time;
}
double stop(std::string event, bool verbose=true) {
gettimeofday(&time, NULL);
double eventTime = time.tv_sec - timer[event].tv_sec +
(time.tv_usec - timer[event].tv_usec) * 1e-6;
if (verbose)
print(event, eventTime);
return eventTime;
}
}
#endif
|
OMP-Jacobi-1D-Naive-Parallel.test.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <ctype.h>
#include <stdbool.h>
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
bool initedJacobi = false;
int globalSeed = -1;
int cores = -1;
int problemSize = -1, T = -1, lowerBound = -1, upperBound = -1;
double* space[2] = { NULL, NULL }; // space[t][x] for (t,x) in { {0,1} X {lowerBound, ... , upperBound} };
int min( int a, int b){
return (a <= b)? a : b;
}
int max( int a, int b){
return (a >= b)? a : b;
}
void initJacobi(){
// if init has not already been called (preserve things like global seed.
if( ! initedJacobi ){
// note the convention someVar = ( someVar == -1 )? defaultValue : someVar ;
// this allows us to use the cmd line flags to set variables, AND have an init call.
// all values are initialized with -1 in global space, so if someVar == -1, then it has
// not been set, and and be given a default value.
// seed for random number generator.
// allows all initSpace calls to generate the same inital values
globalSeed = (globalSeed== -1)? time(NULL) : globalSeed;
// problemSpace parameters
T = (T == -1)? 100 : T;
problemSize = (problemSize == -1)? 1000000 : problemSize;
lowerBound = 1;
upperBound = lowerBound + problemSize - 1;
cores = (cores == -1)? omp_get_num_procs() : cores ;
omp_set_num_threads( cores );
// set initialization flag
initedJacobi = true;
}
}
// initialize space array
void initSpace(){
// if space has been previously allocated, free up space.
if( space[0] != NULL ){
free( space[0] );
}
if( space[1] != NULL ) {
free( space[1] );
}
/*
// allocate space
space = (double**) malloc( 2 * sizeof(double*) );
if( space == NULL ){
printf( "Could not allocate space array\n" );
exit(0);
}
*/
// allocate time-steps 0 and 1
space[0] = (double*) malloc( (problemSize + 2) * sizeof(double));
space[1] = (double*) malloc( (problemSize + 2) * sizeof(double));
if( space[0] == NULL || space[1] == NULL ){
printf( "Could not allocate space array\n" );
exit(0);
}
// use global seed to seed the random number gen (will be constant)
srand(globalSeed);
// seed the space.
int x;
for( x = lowerBound; x <= upperBound; ++x ){
space[0][x] = rand() / (double)rand();
}
// set halo values (sanity)
space[0][0] = 0;
space[0][upperBound+1] = 0;
space[1][0] = 0;
space[1][upperBound+1] = 0;
}
// stencil call.
void stencil( int read, int write, int x ){
// stencil operation
space[write][x] = (space[read][x-1] + space[read][x] + space[read][x+1])/3;
}
// parse int abstraction from strtol
int parseInt( char* string ){
return (int) strtol( string, NULL, 10 );
}
bool verifyResult( bool verbose ){
assert( space[0] != NULL && space[1] != NULL );
double* endSpace = (double*) malloc( (problemSize + 2) * sizeof(double) );
for( int x = 0; x < problemSize + 2; ++x ){
endSpace[x] = space[T & 1][x];
}
initSpace();
int read = 0, write = 1;
for( int t = 1; t <= T; ++t ){
for( int x = lowerBound; x <= upperBound; ++x ){
stencil(read, write, x);
}
read = write;
write = 1 - write;
}
bool failed = false;
for( int x = lowerBound; x <= upperBound; ++x ){
if( endSpace[x] != space[T & 1][x] ){
failed = true;
if( verbose ) printf( "FAILED\n"); //! %f != %f at %d\n", endSpace[x], space[T & 1][x], x );
break;
}
}
if( verbose && !failed ) printf( "SUCCESS\n" );
free( endSpace );
return !failed;
}
// naive parallel iteration test suite
double test_1(){
int t, x, read = 0, write = 1;
double start_time = omp_get_wtime();
for( t = 1; t <= T; ++t ){
#pragma omp parallel for private( x ) //schedule(dynamic)
for( x = lowerBound; x <= upperBound; ++x ){
stencil( read, write, x );
}
read = write;
write = 1 - write;
}
double end_time = omp_get_wtime();
return (end_time - start_time);
}
int main( int argc, char* argv[] ){
setbuf(stdout, NULL); // set buffer to null, so prints ALWAYS print (for debug purposes mainly)
bool verify = false;
bool printtime = true;
// Command line parsing
char c;
while ((c = getopt (argc, argv, "nc:s:p:T:hv")) != -1){
switch( c ) {
case 'n': // print time
printtime = false;
break;
case 'c': // cores
cores = parseInt( optarg );
if( cores <= 0 ){
fprintf(stderr, "cores must be greater than 0: %d\n", cores);
exit( 0 );
}
break;
case 'p': // problem size
problemSize = parseInt( optarg );
if( problemSize <= 0 ){
fprintf(stderr, "problemSize must be greater than 0: %d\n", problemSize);
exit( 0 );
}
break;
case 'T': // T (time steps)
T = parseInt( optarg );
if( T <= 0 ){
fprintf(stderr, "T must be greater than 0: %d\n", T);
exit( 0 );
}
break;
case 'h': // help
printf("usage: %s\n-n \t dont print time \n-p <problem size> \t problem size in elements \n-T <time steps>\t number of time steps\n-c <cores>\tnumber of threads\n-h\tthis dialogue\n-v\tverify output\n", argv[0]);
exit(0);
case 'v': // verify;
verify = true;
break;
case '?':
if (optopt == 'p')
fprintf (stderr, "Option -%c requires positive int argument: problem size.\n", optopt);
else if (optopt == 'T')
fprintf (stderr, "Option -%c requires positive int argument: T.\n", optopt);
else if (optopt == 's')
fprintf (stderr, "Option -%c requires int argument: subset_s.\n", optopt);
else if (optopt == 'c')
fprintf (stderr, "Option -%c requires int argument: number of cores.\n", optopt);
else if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt);
exit(0);
default:
exit(0);
}
}
initJacobi();
initSpace();
double time = test_1();
if( printtime ){
printf( "Time: %f\n", time );
}
if( verify ){
verifyResult( true );
}
}
|
omp_loop_ordered.c | /* vim: set ts=4 sw=4: */
/* Filename : omp_loop_ordered.c
* Description : simple OpenMP model
* Author : SunYoung Kim <sunyzero@gmail.com>
* Notes :
*/
#include <stdio.h>
int main()
{
int i;
#pragma omp parallel
#pragma omp for ordered
for (i=0; i<8; i++) {
printf("[%d] Hello OpenMP\n", i);
#pragma omp ordered
{
printf("\t[%d] Hello OpenMP : ordered block.\n", i);
}
}
/* implicit barrier */
return 0;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
#pragma omp atomic
--(info[j].users);
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
}
template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
integrate.c | /*
* integrate.c: Example of numerical integration in OpenMP.
*
* (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com>
*/
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
const double PI = 3.14159265358979323846;
const double a = -4.0;
const double b = 4.0;
const int nsteps = 40000000;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
double func(double x)
{
return exp(-x * x);
}
/* integrate: Integrates by rectangle method (midpoint rule) */
double integrate(double (*func)(double), double a, double b, int n)
{
double h = (b - a) / n;
double sum = 0.0;
for (int i = 0; i < n; i++)
sum += func(a + h * (i + 0.5));
sum *= h;
return sum;
}
double run_serial()
{
double t = wtime();
double res = integrate(func, a, b, nsteps);
t = wtime() - t;
printf("Result (serial): %.12f; error %.12f\n", res, fabs(res - sqrt(PI)));
return t;
}
double integrate_omp(double (*func)(double), double a, double b, int n)
{
double h = (b - a) / n;
double sum = 0.0;
#pragma omp parallel
{
double sumloc = 0.0;
#pragma omp for
for (int i = 0; i < n; i++)
sumloc += func(a + h * (i + 0.5));
#pragma omp atomic
sum += sumloc;
}
sum *= h;
return sum;
}
double run_parallel()
{
double t = wtime();
double res = integrate_omp(func, a, b, nsteps);
t = wtime() - t;
printf("Result (parallel): %.12f; error %.12f\n", res, fabs(res - sqrt(PI)));
return t;
}
int main(int argc, char **argv)
{
printf("Integration f(x) on [%.12f, %.12f], nsteps = %d\n", a, b, nsteps);
double tserial = run_serial();
double tparallel = run_parallel();
printf("Execution time (serial): %.6f\n", tserial);
printf("Execution time (parallel): %.6f\n", tparallel);
printf("Speedup: %.2f\n", tserial / tparallel);
return 0;
}
|
mpc_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_MPC_CONTACT_CRITERIA_H)
#define KRATOS_MPC_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/variable_utils.h"
#include "custom_utilities/contact_utilities.h"
#include "processes/simple_mortar_mapper_wrapper_process.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class MPCContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Custom convergence criteria for the contact problem
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class MPCContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of MPCContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( MPCContactCriteria );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The components containers
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
// Geometry definition
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
typedef CouplingGeometry<NodeType> CouplingGeometryType;
///@}
///@name Life Cycle
///@{
/// Default constructors
explicit MPCContactCriteria()
: BaseType()
{
}
///Copy constructor
MPCContactCriteria( MPCContactCriteria const& rOther )
: BaseType(rOther)
{
}
/// Destructor
~MPCContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Criterias that need to be called before getting the solution
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PreCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
BaseType::PreCriteria(rModelPart, rDofSet, rA, rDx, rb);
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// We initailize the contact force
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
const auto it_node_begin = r_nodes_array.begin();
// We save the current WEIGHTED_GAP in the buffer and reset the CONTACT_FORCE
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
it_node->SetValue(CONTACT_FORCE, zero_array);
it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
}
// Compute weighted gap
ComputeWeightedGap(rModelPart);
// Reset the NODAL_AREA
VariableUtils().SetNonHistoricalVariableToZero(NODAL_AREA, r_nodes_array);
return true;
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// We call the base class
BaseType::PostCriteria(rModelPart, rDofSet, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info[NL_ITERATION_NUMBER] > 0) {
// Getting REACTION_CHECK_STIFFNESS_FACTOR
const double reaction_check_stiffness_factor = r_process_info.Has(REACTION_CHECK_STIFFNESS_FACTOR) ? r_process_info.GetValue(REACTION_CHECK_STIFFNESS_FACTOR) : 1.0e-12;
// Compute weighted gap
ComputeWeightedGap(rModelPart);
// Transfer reaction from master to slave
std::size_t sub_contact_counter = 0;
CounterContactModelParts(rModelPart, sub_contact_counter);
// Mapping reaction
Parameters mapping_parameters = Parameters(R"({"distance_threshold" : 1.0e24, "update_interface" : false, "origin_variable" : "REACTION", "mapping_coefficient" : -1.0e0})" );
if (r_process_info.Has(DISTANCE_THRESHOLD)) {
mapping_parameters["distance_threshold"].SetDouble(r_process_info[DISTANCE_THRESHOLD]);
}
auto& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
for (std::size_t i_contact = 0; i_contact < sub_contact_counter; ++i_contact) {
auto& r_sub = r_contact_model_part.GetSubModelPart("ContactSub" + std::to_string(i_contact));
auto& r_sub_master = r_sub.GetSubModelPart("MasterSubModelPart" + std::to_string(i_contact));
auto& r_sub_slave = r_sub.GetSubModelPart("SlaveSubModelPart" + std::to_string(i_contact));
SimpleMortarMapperProcessWrapper(r_sub_master, r_sub_slave, mapping_parameters).Execute();
}
// TODO: Add frictional check
// Getting process info
Properties::Pointer p_properties = rModelPart.Elements().begin()->pGetProperties();
for (auto& r_elements : rModelPart.Elements()) {
if (r_elements.pGetProperties()->Has(YOUNG_MODULUS)) {
p_properties = r_elements.pGetProperties();
}
}
// Defining the convergence
IndexType is_active_set_converged = 0, is_slip_converged = 0;
// Checking just after first iteration
// We get the YOUNG_MODULUS
const double young_modulus = p_properties->Has(YOUNG_MODULUS) ? p_properties->GetValue(YOUNG_MODULUS) : 0.0;
const double auxiliar_check = young_modulus > 0.0 ? -(reaction_check_stiffness_factor * young_modulus) : 0.0;
// We check the active/inactive set during the first non-linear iteration or for the general semi-smooth case
NodesArrayType& r_nodes_array = r_contact_model_part.Nodes();
const auto it_node_begin = r_nodes_array.begin();
// If frictionaless or mesh tying
if (rModelPart.IsNot(SLIP)) {
#pragma omp parallel for reduction(+:is_active_set_converged)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
if (it_node->Is(SLAVE)) {
// The contact force corresponds with the reaction in the normal direction
const array_1d<double, 3>& r_total_force = it_node->FastGetSolutionStepValue(REACTION);
const double nodal_area = it_node->Has(NODAL_AREA) ? it_node->GetValue(NODAL_AREA) : 1.0;
const double gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP)/nodal_area;
const array_1d<double, 3>& r_normal = it_node->FastGetSolutionStepValue(NORMAL);
const double contact_force = inner_prod(r_total_force, r_normal);
const double contact_pressure = contact_force/it_node->GetValue(NODAL_MAUX);
if (contact_pressure < auxiliar_check || gap < 0.0) { // NOTE: This could be conflictive (< or <=)
// We save the contact force
it_node->SetValue(CONTACT_FORCE, contact_force/it_node->GetValue(NODAL_PAUX) * r_normal);
it_node->SetValue(NORMAL_CONTACT_STRESS, contact_pressure);
if (it_node->IsNot(ACTIVE)) {
it_node->Set(ACTIVE, true);
is_active_set_converged += 1;
}
} else {
if (it_node->Is(ACTIVE)) {
it_node->Set(ACTIVE, false);
is_active_set_converged += 1;
}
}
}
}
} else { // If frictional
#pragma omp parallel for reduction(+:is_active_set_converged, is_slip_converged)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
if (it_node->Is(SLAVE)) {
const double auxiliar_check = young_modulus > 0.0 ? -(reaction_check_stiffness_factor * young_modulus) : 0.0;
// The contact force corresponds with the reaction in the normal direction
const array_1d<double, 3>& r_total_force = it_node->FastGetSolutionStepValue(REACTION);
const double nodal_area = it_node->Has(NODAL_AREA) ? it_node->GetValue(NODAL_AREA) : 1.0;
const double gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP)/nodal_area;
const array_1d<double, 3>& r_normal = it_node->FastGetSolutionStepValue(NORMAL);
const double contact_force = inner_prod(r_total_force, r_normal);
const double normal_contact_pressure = contact_force/it_node->GetValue(NODAL_MAUX);
if (normal_contact_pressure < auxiliar_check || gap < 0.0) { // NOTE: This could be conflictive (< or <=)
// We save the contact force
it_node->SetValue(CONTACT_FORCE, r_total_force/it_node->GetValue(NODAL_PAUX));
it_node->SetValue(NORMAL_CONTACT_STRESS, normal_contact_pressure);
if (it_node->IsNot(ACTIVE)) {
it_node->Set(ACTIVE, true);
is_active_set_converged += 1;
}
// The friction coefficient
const double tangential_contact_pressure = norm_2(r_total_force - contact_force * r_normal)/it_node->GetValue(NODAL_MAUX);
const bool is_slip = it_node->Is(SLIP);
const double mu = it_node->GetValue(FRICTION_COEFFICIENT);
if (tangential_contact_pressure <= - mu * contact_force) { // STICK CASE // TODO: Check the <=
it_node->SetValue(TANGENTIAL_CONTACT_STRESS, tangential_contact_pressure);
if (is_slip) {
it_node->Set(SLIP, false);
is_slip_converged += 1;
}
} else { // SLIP CASE
it_node->SetValue(TANGENTIAL_CONTACT_STRESS, - mu * contact_force);
if (!is_slip) {
it_node->Set(SLIP, true);
is_slip_converged += 1;
}
}
} else {
if (it_node->Is(ACTIVE)) {
it_node->Set(ACTIVE, false);
it_node->Reset(SLIP);
is_active_set_converged += 1;
}
}
}
}
}
// We set the constraints active and inactive in function of the active set
ConditionsArrayType& r_conditions_array = rModelPart.GetSubModelPart("ComputingContact").Conditions();
auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
const auto& r_slave_geometry = it_cond->GetGeometry().GetGeometryPart(CouplingGeometryType::Master);
std::size_t counter = 0;
for (auto& r_node : r_slave_geometry) {
if (r_node.IsNot(ACTIVE)) {
++counter;
}
}
// In case of traction we deactivate
if (counter == r_slave_geometry.size()) {
it_cond->Set(ACTIVE, false);
// We deactivate the constraints on inactive conditions
if (it_cond->Has(CONSTRAINT_POINTER)) {
auto p_const = it_cond->GetValue(CONSTRAINT_POINTER);
// In case of traction we deactivate
p_const->Set(ACTIVE, false);
} else {
KRATOS_ERROR << "Contact conditions must have defined CONSTRAINT_POINTER" << std::endl;
}
}
}
// We save to the process info if the active set has converged
const bool active_set_converged = (is_active_set_converged == 0 ? true : false);
const bool slip_set_converged = (is_slip_converged == 0 ? true : false);
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (active_set_converged) {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tActive set") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
} else {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tActive set") << " convergence is " << BOLDFONT(FRED("not achieved")) << std::endl;
}
if (slip_set_converged) {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tSlip set") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
} else {
KRATOS_INFO("MPCContactCriteria") << BOLDFONT("\tSlip set") << " convergence is " << BOLDFONT(FRED("not achieved")) << std::endl;
}
}
return (active_set_converged && slip_set_converged);
}
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart The model part of interest
*/
void Initialize(ModelPart& rModelPart) override
{
BaseType::Initialize(rModelPart);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method computes the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
*/
void ComputeWeightedGap(ModelPart& rModelPart)
{
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
// Set to zero the weighted gap
if (rModelPart.Is(SLIP)) {
// Reset
VariableUtils().SetHistoricalVariableToZero(WEIGHTED_GAP, r_nodes_array);
VariableUtils().SetHistoricalVariableToZero(WEIGHTED_SLIP, r_nodes_array);
} else {
VariableUtils().SetHistoricalVariableToZero(WEIGHTED_GAP, r_nodes_array);
}
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
}
/**
* @brief This method computes the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rCounter Reference to the counter
*/
void CounterContactModelParts(
ModelPart& rModelPart,
std::size_t& rCounter
)
{
for (auto& r_name : rModelPart.GetSubModelPartNames()) {
if (r_name.find("ContactSub") != std::string::npos && r_name.find("ComputingContactSub") == std::string::npos) {
++rCounter;
}
auto& r_sub = rModelPart.GetSubModelPart(r_name);
if (r_sub.IsSubModelPart()) {
CounterContactModelParts(r_sub, rCounter);
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Serialization
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Class MPCContactCriteria
///@name Explicit Specializations
///@{
} // namespace Kratos
#endif /* KRATOS_MPC_CONTACT_CRITERIA_H defined */
|
test.c | #define N 1024
#define _GNU_SOURCE
#include <link.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
// If one of the libomptarget plugins has been loaded, it means we are running
// w/ith libomptarget. libomptarget.so is also used by LOMP, so we need to check
// for libomptarget.rtl.*.
/*
static int isLibomptarget(struct dl_phdr_info *info, size_t size,
void *data) {
if (strstr(info->dlpi_name, "libomptarget.rtl") != NULL) {
*((int *) data) = 1;
return 1;
}
return 0;
}
*/
#pragma omp requires unified_shared_memory
#define TEST_NESTED 1
#define TEST_CONCURRENT 1
#define TEST_CONCURRENT_TF 1
#define TEST_PARALLEL1 1
int a[N], b[N];
int main() {
int i;
int error, totError = 0;
#if TEST_NESTED
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps completed successfully\n");
} else {
printf(" test with nested maps completed with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT_TF
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(tofrom:a, b)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent with to/from maps completed successfully\n");
} else {
printf(" test with concurrent with to/from maps completed with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_CONCURRENT
// This test cannot run correctly with libomptarget because the library does
// not support proper async. Fake the output in this case.
//int libomptargetInUse = 0;
//dl_iterate_phdr(isLibomptarget, &libomptargetInUse);
//if (libomptargetInUse) {
// printf(" test with concurrent maps completed successfully\n");
//} else {
// Run actual test
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with concurrent maps completed successfully\n");
} else {
printf(" test with concurrent maps completed with %d error(s)\n", error);
totError++;
}
//}
#endif
#if TEST_PARALLEL1
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp parallel num_threads(1)
{
#pragma omp target data map(to:b) map(from: a)
{
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(to:b) map(from: a)
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
}
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with nested maps and Parallel 1 thread completed successfully\n");
} else {
printf(" test with nested maps and Parallel 1 thread completed with %d error(s)\n", error);
totError++;
}
#endif
printf("completed with %d errors\n", totError);
return totError;
}
|
GB_resize.c | //------------------------------------------------------------------------------
// GB_resize: change the size of a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_select.h"
#define GB_FREE_ALL GB_PHIX_FREE (A) ;
GrB_Info GB_resize // change the size of a matrix
(
GrB_Matrix A, // matrix to modify
const GrB_Index nrows_new, // new number of rows in matrix
const GrB_Index ncols_new, // new number of columns in matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A to resize", GB0) ;
//--------------------------------------------------------------------------
// handle the CSR/CSC format
//--------------------------------------------------------------------------
int64_t vdim_old = A->vdim ;
int64_t vlen_old = A->vlen ;
int64_t vlen_new, vdim_new ;
if (A->is_csc)
{
vlen_new = nrows_new ;
vdim_new = ncols_new ;
}
else
{
vlen_new = ncols_new ;
vdim_new = nrows_new ;
}
//--------------------------------------------------------------------------
// determine the max # of threads to use here
//--------------------------------------------------------------------------
// GB_selector (RESIZE) will use a different # of threads
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (vdim_new - vdim_old, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// delete any lingering zombies and assemble any pending tuples
//--------------------------------------------------------------------------
// only do so if either dimension is shrinking, or if pending tuples exist
// and vdim_old <= 1 and vdim_new > 1, since in that case, Pending->j has
// not been allocated yet, but would be required in the resized matrix.
if (vdim_new < vdim_old || vlen_new < vlen_old ||
(GB_PENDING (A) && vdim_old <= 1 && vdim_new > 1))
{
GB_WAIT (A) ;
ASSERT_MATRIX_OK (A, "A to resize, wait", GB0) ;
}
//--------------------------------------------------------------------------
// check for early conversion to hypersparse
//--------------------------------------------------------------------------
// If the # of vectors grows very large, it is costly to reallocate enough
// space for the non-hypersparse A->p component. So convert the matrix to
// hypersparse if that happens.
GrB_Info info ;
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = GB_nvec_nonempty (A, Context) ;
}
if (GB_to_hyper_test (A, A->nvec_nonempty, vdim_new))
{
GB_OK (GB_to_hyper (A, Context)) ;
}
//--------------------------------------------------------------------------
// resize the number of sparse vectors
//--------------------------------------------------------------------------
bool ok = true ;
int64_t *GB_RESTRICT Ah = A->h ;
int64_t *GB_RESTRICT Ap = A->p ;
A->vdim = vdim_new ;
if (A->is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse: decrease size of A->p and A->h only if needed
//----------------------------------------------------------------------
if (vdim_new < A->plen)
{
// reduce the size of A->p and A->h; this cannot fail
info = GB_hyper_realloc (A, vdim_new, Context) ;
ASSERT (info == GrB_SUCCESS) ;
Ap = A->p ;
Ah = A->h ;
}
if (vdim_new < vdim_old)
{
// descrease A->nvec to delete the vectors outside the range
// 0...vdim_new-1.
int64_t pleft = 0 ;
int64_t pright = GB_IMIN (A->nvec, vdim_new) - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (vdim_new, Ah, pleft, pright, found) ;
A->nvec = pleft ;
}
}
else
{
//----------------------------------------------------------------------
// A is not hypersparse: change size of A->p to match the new vdim
//----------------------------------------------------------------------
if (vdim_new != vdim_old)
{
// change the size of A->p
GB_REALLOC_MEMORY (A->p, vdim_new+1, vdim_old+1, sizeof (int64_t),
&ok) ;
if (!ok)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
Ap = A->p ;
A->plen = vdim_new ;
}
if (vdim_new > vdim_old)
{
// number of vectors is increasing, extend the vector pointers
int64_t anz = GB_NNZ (A) ;
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = vdim_old + 1 ; j <= vdim_new ; j++)
{
Ap [j] = anz ;
}
// A->nvec_nonempty does not change
}
A->nvec = vdim_new ;
}
if (vdim_new < vdim_old)
{
// number of vectors is decreasing, need to count the new number of
// non-empty vectors, unless it is done during pruning, just below.
A->nvec_nonempty = -1 ; // compute when needed
}
//--------------------------------------------------------------------------
// resize the length of each vector
//--------------------------------------------------------------------------
// if vlen is shrinking, delete entries outside the new matrix
if (vlen_new < vlen_old)
{
GB_OK (GB_selector (NULL, GB_RESIZE_opcode, NULL, false, A, vlen_new-1,
NULL, Context)) ;
}
//--------------------------------------------------------------------------
// vlen has been resized
//--------------------------------------------------------------------------
A->vlen = vlen_new ;
ASSERT_MATRIX_OK (A, "A vlen resized", GB0) ;
//--------------------------------------------------------------------------
// check for conversion to hypersparse or to non-hypersparse
//--------------------------------------------------------------------------
return (GB_to_hyper_conform (A, Context)) ;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
4269.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
#pragma omp target teams distribute thread_limit(256) schedule(dynamic, 4)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp target teams distribute thread_limit(256) schedule(dynamic, 4)
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp target teams distribute thread_limit(256) schedule(dynamic, 4)
for (j1 = 0; j1 < _PB_M; j1++)
{
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
zpotrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_potrf
*
* Performs the Cholesky factorization of a Hermitian positive definite
* matrix A. The factorization has the form
*
* \f[ A = L \times L^H, \f]
* or
* \f[ A = U^H \times U, \f]
*
* where U is an upper triangular matrix and L is a lower triangular matrix.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the Hermitian positive definite matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from the Cholesky
* factorization A = U^H*U or A = L*L^H.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_omp_zpotrf
* @sa plasma_cpotrf
* @sa plasma_dpotrf
* @sa plasma_spotrf
*
******************************************************************************/
int plasma_zpotrf(plasma_enum_t uplo,
int n,
plasma_complex64_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call the tile async function.
plasma_omp_zpotrf(uplo, A, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_potrf
*
* Performs the Cholesky factorization of a Hermitian positive definite
* matrix.
* Non-blocking tile version of plasma_zpotrf().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* On entry, the Hermitian positive definite matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from the Cholesky
* factorization A = U^H*U or A = L*L^H.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zpotrf
* @sa plasma_omp_zpotrf
* @sa plasma_omp_cpotrf
* @sa plasma_omp_dpotrf
* @sa plasma_omp_spotrf
*
******************************************************************************/
void plasma_omp_zpotrf(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0)
return;
// Call the parallel function.
plasma_pzpotrf(uplo, A, sequence, request);
}
|
convolution_1x1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const float* kernel = _kernel;
// interleave
#if __ARM_NEON && __aarch64__
kernel_tm.create(4*8, inch/4 + inch%4, outch/8 + (outch%8)/4 + outch%4, (size_t)2u, 1);
#else
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)2u, 1);
#endif // __ARM_NEON && __aarch64__
int p = 0;
#if __ARM_NEON && __aarch64__
for (; p+7<outch; p+=8)
{
const float* kernel0 = kernel + (p+0)*inch;
const float* kernel1 = kernel + (p+1)*inch;
const float* kernel2 = kernel + (p+2)*inch;
const float* kernel3 = kernel + (p+3)*inch;
const float* kernel4 = kernel + (p+4)*inch;
const float* kernel5 = kernel + (p+5)*inch;
const float* kernel6 = kernel + (p+6)*inch;
const float* kernel7 = kernel + (p+7)*inch;
unsigned short* ktmp = kernel_tm.channel(p/8);
for (int q=0; q<inch; q++)
{
// kernel0...7 0
ktmp[0] = float32_to_bfloat16(kernel0[0]);
ktmp[1] = float32_to_bfloat16(kernel1[0]);
ktmp[2] = float32_to_bfloat16(kernel2[0]);
ktmp[3] = float32_to_bfloat16(kernel3[0]);
ktmp[4] = float32_to_bfloat16(kernel4[0]);
ktmp[5] = float32_to_bfloat16(kernel5[0]);
ktmp[6] = float32_to_bfloat16(kernel6[0]);
ktmp[7] = float32_to_bfloat16(kernel7[0]);
ktmp += 8;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
kernel4 += 1;
kernel5 += 1;
kernel6 += 1;
kernel7 += 1;
}
}
#endif // __ARM_NEON && __aarch64__
for (; p+3<outch; p+=4)
{
const float* kernel0 = kernel + (p+0)*inch;
const float* kernel1 = kernel + (p+1)*inch;
const float* kernel2 = kernel + (p+2)*inch;
const float* kernel3 = kernel + (p+3)*inch;
#if __ARM_NEON && __aarch64__
unsigned short* ktmp = kernel_tm.channel(p/8 + (p%8)/4);
#else
unsigned short* ktmp = kernel_tm.channel(p/4);
#endif // __ARM_NEON && __aarch64__
for (int q=0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = float32_to_bfloat16(kernel0[0]);
ktmp[1] = float32_to_bfloat16(kernel1[0]);
ktmp[2] = float32_to_bfloat16(kernel2[0]);
ktmp[3] = float32_to_bfloat16(kernel3[0]);
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const float* kernel0 = kernel + p*inch;
#if __ARM_NEON && __aarch64__
unsigned short* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4);
#else
unsigned short* ktmp = kernel_tm.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
for (int q=0; q<inch; q++)
{
ktmp[0] = float32_to_bfloat16(kernel0[0]);
ktmp++;
kernel0++;
}
}
}
static void conv1x1s1_sgemm_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 2u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i;
unsigned short* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
#if __aarch64__
vst1q_u16(tmpptr, vld1q_u16(img0));
tmpptr += 8;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.u16 {d0-d1}, [%0 :64] \n"
"vst1.u16 {d0-d1}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
img0 += bottom_blob.cstep;
#endif // __aarch64__
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i;
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
#if __aarch64__
vst1_u16(tmpptr, vld1_u16(img0));
tmpptr += 4;
img0 += bottom_blob.cstep;
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#endif // __aarch64__
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i;
unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p+1);
unsigned short* outptr2 = top_blob.channel(p+2);
unsigned short* outptr3 = top_blob.channel(p+3);
unsigned short* outptr4 = top_blob.channel(p+4);
unsigned short* outptr5 = top_blob.channel(p+5);
unsigned short* outptr6 = top_blob.channel(p+6);
unsigned short* outptr7 = top_blob.channel(p+7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i+7<size; i+=8)
{
const unsigned short* tmpptr = tmp.channel(i/8);
const unsigned short* kptr = kernel.channel(p/8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%8], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%8], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4h, v9.4h}, [%8], #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4h, v1.4h}, [%9], #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%0], #16 \n"
"st1 {v18.4h, v19.4h}, [%1], #16 \n"
"st1 {v20.4h, v21.4h}, [%2], #16 \n"
"st1 {v22.4h, v23.4h}, [%3], #16 \n"
"st1 {v24.4h, v25.4h}, [%4], #16 \n"
"st1 {v26.4h, v27.4h}, [%5], #16 \n"
"st1 {v28.4h, v29.4h}, [%6], #16 \n"
"st1 {v30.4h, v31.4h}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<size; i+=4)
{
const unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
const unsigned short* kptr = kernel.channel(p/8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%8], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #64] \n"
"ld1 {v8.4h}, [%8], #8 \n"
"shll v8.4s, v8.4h, #16 \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4h, v1.4h}, [%9], #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h}, [%0], #8 \n"
"st1 {v17.4h}, [%1], #8 \n"
"st1 {v18.4h}, [%2], #8 \n"
"st1 {v19.4h}, [%3], #8 \n"
"st1 {v20.4h}, [%4], #8 \n"
"st1 {v21.4h}, [%5], #8 \n"
"st1 {v22.4h}, [%6], #8 \n"
"st1 {v23.4h}, [%7], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
const unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const unsigned short* kptr = kernel.channel(p/8);
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #64] \n"
"ld1 {v8.4h}, [%8], #8 \n"
"shll v8.4s, v8.4h, #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #16] \n"
"ld1r {v8.4h}, [%8], #2 \n"
"shll v8.4s, v8.4h, #16 \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4h, v1.4h}, [%9], #16 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"st1 {v24.h}[0],[%0], #2 \n"
"st1 {v24.h}[1],[%1], #2 \n"
"st1 {v24.h}[2],[%2], #2 \n"
"st1 {v24.h}[3],[%3], #2 \n"
"st1 {v25.h}[0],[%4], #2 \n"
"st1 {v25.h}[1],[%5], #2 \n"
"st1 {v25.h}[2],[%6], #2 \n"
"st1 {v25.h}[3],[%7], #2 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
}
#endif // __ARM_NEON && __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p+1);
unsigned short* outptr2 = top_blob.channel(p+2);
unsigned short* outptr3 = top_blob.channel(p+3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i+7<size; i+=8)
{
const unsigned short* tmpptr = tmp.channel(i/8);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr = kernel.channel(p/8 + (p%8)/4);
#else
const unsigned short* kptr = kernel.channel(p/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4h, v5.4h}, [%4], #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%0], #16 \n"
"st1 {v10.4h, v11.4h}, [%1], #16 \n"
"st1 {v12.4h, v13.4h}, [%2], #16 \n"
"st1 {v14.4h, v15.4h}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.u16 {d10-d11}, [%4 :64]! \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d28, q14, #16 \n"
"vshrn.u32 d29, q15, #16 \n"
"vst1.u16 {d16-d17}, [%0 :64]! \n"
"vst1.u16 {d20-d21}, [%1 :64]! \n"
"vst1.u16 {d24-d25}, [%2 :64]! \n"
"vst1.u16 {d28-d29}, [%3 :64]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0_0 = biasptr[0];
float sum0_1 = biasptr[0];
float sum0_2 = biasptr[0];
float sum0_3 = biasptr[0];
float sum0_4 = biasptr[0];
float sum0_5 = biasptr[0];
float sum0_6 = biasptr[0];
float sum0_7 = biasptr[0];
float sum1_0 = biasptr[1];
float sum1_1 = biasptr[1];
float sum1_2 = biasptr[1];
float sum1_3 = biasptr[1];
float sum1_4 = biasptr[1];
float sum1_5 = biasptr[1];
float sum1_6 = biasptr[1];
float sum1_7 = biasptr[1];
float sum2_0 = biasptr[2];
float sum2_1 = biasptr[2];
float sum2_2 = biasptr[2];
float sum2_3 = biasptr[2];
float sum2_4 = biasptr[2];
float sum2_5 = biasptr[2];
float sum2_6 = biasptr[2];
float sum2_7 = biasptr[2];
float sum3_0 = biasptr[3];
float sum3_1 = biasptr[3];
float sum3_2 = biasptr[3];
float sum3_3 = biasptr[3];
float sum3_4 = biasptr[3];
float sum3_5 = biasptr[3];
float sum3_6 = biasptr[3];
float sum3_7 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]);
sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]);
sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]);
sum0_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]);
sum0_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]);
sum0_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]);
sum0_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]);
sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]);
sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]);
sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]);
sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]);
sum1_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[1]);
sum1_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[1]);
sum1_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[1]);
sum1_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[1]);
sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]);
sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]);
sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]);
sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]);
sum2_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[2]);
sum2_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[2]);
sum2_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[2]);
sum2_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[2]);
sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]);
sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]);
sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]);
sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]);
sum3_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[3]);
sum3_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[3]);
sum3_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[3]);
sum3_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[3]);
tmpptr += 8;
kptr += 4;
}
outptr0[0] = float32_to_bfloat16(sum0_0);
outptr0[1] = float32_to_bfloat16(sum0_1);
outptr0[2] = float32_to_bfloat16(sum0_2);
outptr0[3] = float32_to_bfloat16(sum0_3);
outptr0[4] = float32_to_bfloat16(sum0_4);
outptr0[5] = float32_to_bfloat16(sum0_5);
outptr0[6] = float32_to_bfloat16(sum0_6);
outptr0[7] = float32_to_bfloat16(sum0_7);
outptr1[0] = float32_to_bfloat16(sum1_0);
outptr1[1] = float32_to_bfloat16(sum1_1);
outptr1[2] = float32_to_bfloat16(sum1_2);
outptr1[3] = float32_to_bfloat16(sum1_3);
outptr1[4] = float32_to_bfloat16(sum1_4);
outptr1[5] = float32_to_bfloat16(sum1_5);
outptr1[6] = float32_to_bfloat16(sum1_6);
outptr1[7] = float32_to_bfloat16(sum1_7);
outptr2[0] = float32_to_bfloat16(sum2_0);
outptr2[1] = float32_to_bfloat16(sum2_1);
outptr2[2] = float32_to_bfloat16(sum2_2);
outptr2[3] = float32_to_bfloat16(sum2_3);
outptr2[4] = float32_to_bfloat16(sum2_4);
outptr2[5] = float32_to_bfloat16(sum2_5);
outptr2[6] = float32_to_bfloat16(sum2_6);
outptr2[7] = float32_to_bfloat16(sum2_7);
outptr3[0] = float32_to_bfloat16(sum3_0);
outptr3[1] = float32_to_bfloat16(sum3_1);
outptr3[2] = float32_to_bfloat16(sum3_2);
outptr3[3] = float32_to_bfloat16(sum3_3);
outptr3[4] = float32_to_bfloat16(sum3_4);
outptr3[5] = float32_to_bfloat16(sum3_5);
outptr3[6] = float32_to_bfloat16(sum3_6);
outptr3[7] = float32_to_bfloat16(sum3_7);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr = kernel.channel(p/8 + (p%8)/4);
#else
const unsigned short* kptr = kernel.channel(p/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v4.4h}, [%4], #8 \n"
"shll v4.4s, v4.4h, #16 \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v8.4h}, [%0], #8 \n"
"st1 {v9.4h}, [%1], #8 \n"
"st1 {v10.4h}, [%2], #8 \n"
"st1 {v11.4h}, [%3], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #64] \n"
"vld1.u16 {d9}, [%4 :64]! \n"
"vshll.u16 q4, d9, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d18, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d22, q11, #16 \n"
"vst1.u16 {d16}, [%0 :64]! \n"
"vst1.u16 {d18}, [%1 :64]! \n"
"vst1.u16 {d20}, [%2 :64]! \n"
"vst1.u16 {d22}, [%3 :64]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
#else
float sum0_0 = biasptr[0];
float sum0_1 = biasptr[0];
float sum0_2 = biasptr[0];
float sum0_3 = biasptr[0];
float sum1_0 = biasptr[1];
float sum1_1 = biasptr[1];
float sum1_2 = biasptr[1];
float sum1_3 = biasptr[1];
float sum2_0 = biasptr[2];
float sum2_1 = biasptr[2];
float sum2_2 = biasptr[2];
float sum2_3 = biasptr[2];
float sum3_0 = biasptr[3];
float sum3_1 = biasptr[3];
float sum3_2 = biasptr[3];
float sum3_3 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]);
sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]);
sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]);
sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]);
sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]);
sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]);
sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]);
sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]);
sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]);
sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]);
sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]);
sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]);
sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]);
sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]);
sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]);
tmpptr += 4;
kptr += 4;
}
outptr0[0] = float32_to_bfloat16(sum0_0);
outptr0[1] = float32_to_bfloat16(sum0_1);
outptr0[2] = float32_to_bfloat16(sum0_2);
outptr0[3] = float32_to_bfloat16(sum0_3);
outptr1[0] = float32_to_bfloat16(sum1_0);
outptr1[1] = float32_to_bfloat16(sum1_1);
outptr1[2] = float32_to_bfloat16(sum1_2);
outptr1[3] = float32_to_bfloat16(sum1_3);
outptr2[0] = float32_to_bfloat16(sum2_0);
outptr2[1] = float32_to_bfloat16(sum2_1);
outptr2[2] = float32_to_bfloat16(sum2_2);
outptr2[3] = float32_to_bfloat16(sum2_3);
outptr3[0] = float32_to_bfloat16(sum3_0);
outptr3[1] = float32_to_bfloat16(sum3_1);
outptr3[2] = float32_to_bfloat16(sum3_2);
outptr3[3] = float32_to_bfloat16(sum3_3);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr = kernel.channel(p/8 + (p%8)/4);
#else
const unsigned short* kptr = kernel.channel(p/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v4.4h}, [%4], #8 \n"
"shll v4.4s, v4.4h, #16 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #16] \n"
"ld1r {v4.4h}, [%4], #2 \n"
"shll v4.4s, v4.4h, #16 \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"shrn v12.4h, v12.4s, #16 \n"
"st1 {v12.h}[0], [%0], #2 \n"
"st1 {v12.h}[1], [%1], #2 \n"
"st1 {v12.h}[2], [%2], #2 \n"
"st1 {v12.h}[3], [%3], #2 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #64] \n"
"vld1.u16 {d9}, [%4 :64]! \n"
"vshll.u16 q4, d9, #16 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #16] \n"
"vld1.u16 {d9[]}, [%4]! \n"
"vshll.u16 q4, d9, #16 \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24[0]}, [%0]! \n"
"vst1.u16 {d24[1]}, [%1]! \n"
"vst1.u16 {d24[2]}, [%2]! \n"
"vst1.u16 {d24[3]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"
);
#endif // __aarch64__
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum1 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]);
sum2 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]);
sum3 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]);
tmpptr++;
kptr += 4;
}
outptr0[0] = float32_to_bfloat16(sum0);
outptr1[0] = float32_to_bfloat16(sum1);
outptr2[0] = float32_to_bfloat16(sum2);
outptr3[0] = float32_to_bfloat16(sum3);
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
unsigned short* outptr0 = out0;
int i = 0;
for (; i+7<size; i+=8)
{
const unsigned short* tmpptr = tmp.channel(i/8);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr = kernel.channel(p/8 + (p%8)/4 + p%4);
#else
const unsigned short* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4h, v5.4h}, [%1], #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"prfm pldl1keep, [%2, #16] \n"
"ld1r {v0.4h}, [%2], #2 \n"
"shll v0.4s, v0.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1 :64]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64]! \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.u16 {d10-d11}, [%1 :64]! \n"
"vshll.u16 q4, d10, #16 \n"
"vshll.u16 q5, d11, #16 \n"
"pld [%2, #16] \n"
"vld1.u16 {d1[]}, [%2]! \n"
"vshll.u16 q0, d1, #16 \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
float sum4 = bias0;
float sum5 = bias0;
float sum6 = bias0;
float sum7 = bias0;
for (int q=0; q<inch; q++)
{
sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]);
sum2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]);
sum3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]);
sum4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]);
sum5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]);
sum6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]);
sum7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]);
tmpptr += 8;
kptr++;
}
outptr0[0] = float32_to_bfloat16(sum0);
outptr0[1] = float32_to_bfloat16(sum1);
outptr0[2] = float32_to_bfloat16(sum2);
outptr0[3] = float32_to_bfloat16(sum3);
outptr0[4] = float32_to_bfloat16(sum4);
outptr0[5] = float32_to_bfloat16(sum5);
outptr0[6] = float32_to_bfloat16(sum6);
outptr0[7] = float32_to_bfloat16(sum7);
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr = kernel.channel(p/8 + (p%8)/4 + p%4);
#else
const unsigned short* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v4.4h}, [%1], #8 \n"
"shll v4.4s, v4.4h, #16 \n"
"prfm pldl1keep, [%2, #16] \n"
"ld1r {v0.4h}, [%2], #2 \n"
"shll v0.4s, v0.4h, #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.u16 {d12-d15}, [%1 :64]! \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2]! \n"
"vshll.u16 q0, d1, #16 \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #64] \n"
"vld1.u16 {d9}, [%1 :64]! \n"
"vshll.u16 q4, d9, #16 \n"
"pld [%2, #16] \n"
"vld1.u16 {d1[]}, [%2]! \n"
"vshll.u16 q0, d1, #16 \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __aarch64__
#else
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
for (int q=0; q<inch; q++)
{
sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
sum1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]);
sum2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]);
sum3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]);
tmpptr += 4;
kptr++;
}
outptr0[0] = float32_to_bfloat16(sum0);
outptr0[1] = float32_to_bfloat16(sum1);
outptr0[2] = float32_to_bfloat16(sum2);
outptr0[3] = float32_to_bfloat16(sum3);
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#if __ARM_NEON && __aarch64__
const unsigned short* kptr = kernel.channel(p/8 + (p%8)/4 + p%4);
#else
const unsigned short* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
int q = 0;
#if __ARM_NEON
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q+3<inch; q+=4)
{
float32x4_t _p0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(tmpptr), 16));
tmpptr += 4;
float32x4_t _k0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(kptr), 16));
kptr += 4;
#if __aarch64__
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __aarch64__
float sum0 = bias0 + vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
#else
float sum0 = bias0;
#endif // __ARM_NEON
for (; q<inch; q++)
{
sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]);
tmpptr++;
kptr++;
}
outptr0[0] = float32_to_bfloat16(sum0);
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo method
% when you are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MaxTextExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MaxTextExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MaxTextExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
ExceptionInfo *exception=AcquireExceptionInfo();
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
exception=DestroyExceptionInfo(exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0, 2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
ssize_t
i;
ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
ssize_t
x,r;
double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel,double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict p;
const IndexPacket
*magick_restrict p_indexes;
PixelPacket
*magick_restrict q;
IndexPacket
*magick_restrict q_indexes;
ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
ssize_t
v;
const double
*magick_restrict k;
const PixelPacket
*magick_restrict k_pixels;
const IndexPacket
*magick_restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+y;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict p;
const IndexPacket
*magick_restrict p_indexes;
PixelPacket
*magick_restrict q;
IndexPacket
*magick_restrict q_indexes;
ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
ssize_t
u;
const double
*magick_restrict k;
const PixelPacket
*magick_restrict k_pixels;
const IndexPacket
*magick_restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+x+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentially white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte != MagickFalse )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
const IndexPacket
*magick_restrict p_indexes;
PixelPacket
*magick_restrict q;
IndexPacket
*magick_restrict q_indexes;
ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
ssize_t
u;
const double
*magick_restrict k;
const PixelPacket
*magick_restrict k_pixels;
const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if (SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
const PixelPacket
*magick_restrict p;
const IndexPacket
*magick_restrict p_indexes;
PixelPacket
*magick_restrict q;
IndexPacket
*magick_restrict q_indexes;
ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
ssize_t
u;
const double
*magick_restrict k;
const PixelPacket
*magick_restrict k_pixels;
const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if ( SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MaxTextExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose != MagickFalse )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose != MagickFalse ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose != MagickFalse ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose != MagickFalse && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose != MagickFalse && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,image,0,0);
break;
case EdgeMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose != MagickFalse ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias"
or "-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showKernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
bias=image->bias;
if ((method == ConvolveMorphology) || (method == CorrelateMorphology))
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:bias");
if (artifact != (const char *) NULL)
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ size_t
i,j,x,y;
double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
ssize_t
i;
double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showKernel' option request.
%
% The format of the ShowKernelInfo method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
texture_extensions.c | // The MIT License
//
// Copyright (c) 2021, 2022 Marcus Der marcusder@hotmail.com
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include<omp.h>
#include<stdlib.h>
#include<stdio.h>
#include<math.h>
#include<string.h>
#define OUTSIDE_IMAGE(x,y,w,h) (x < 0 || x >= w || y < 0 || y >= h)
#define IMAGE_INDEX(x,y,w) ((y) * (w) + x)
#define CLAMP(val,min,max) ((val < min) ? (min) : ((val > max) ? (max) : (val)))
typedef struct LineData {
float xStart;
float yStart;
float xEnd;
float yEnd;
float thickness;
float blur;
} LineData;
typedef struct IslandLines {
int numLines;
int islandIdx;
LineData* lineData;
} IslandLines;
typedef struct Island {
int numTriangles;
float* triangles;
} Island;
typedef struct ImageData {
int width;
int height;
float* scratch;
} ImageData;
typedef struct ShortPair {
short x;
short y;
} ShortPair;
typedef struct ThreadData {
float* scratch;
ShortPair topLeft;
ShortPair bottomRight;
} ThreadData;
typedef struct BitmaskData {
unsigned long long* bitmask;
unsigned long long* dilatedBitmask;
} BitmaskData;
typedef struct EdgeAwareData {
unsigned long long* bitmask;
unsigned long long maskIdx;
void (*writeFunc)(int, int, int, int, void*, void*);
void* argData;
} EdgeAwareData;
typedef struct EmbeddedWriteData {
void (*writeFunc)(int, int, int, int, void*, void*);
void* argData;
} EmbeddedWriteData;
typedef struct BrushData {
void* brush;
short brushWidth;
short brushHeight;
} BrushData;
void clearThreadData(ThreadData* data);
ThreadData* createThreadData(int numThreads, ImageData* imgData) {
int width = imgData->width;
int height = imgData->height;
ThreadData* data = (ThreadData*) malloc(numThreads * sizeof(ThreadData));
for(int i =0;i<numThreads;i++) {
// worst case scenario is two perfectly vertical lines, which this holds exactly
data[i].scratch = calloc(width * height, sizeof(float));
clearThreadData(data + i);
}
return data;
}
BitmaskData* createBitmaskData(int width, int height) {
unsigned long long* bitmask = malloc(width * height * sizeof(unsigned long long));
unsigned long long* dilatedBitmask = malloc(width * height * sizeof(unsigned long long));
BitmaskData* data = malloc(sizeof(BitmaskData));
data->bitmask=bitmask;
data->dilatedBitmask=dilatedBitmask;
return data;
}
IslandLines* convertLineData(float* data, int numLines) {
IslandLines* lines = (IslandLines*) malloc(sizeof(IslandLines) * numLines);
int idx = 0;
for(int i = 0; i < numLines; i++) {
int numData = (int) (data[idx++] + 0.5f);
int maskIdx = ((int) (data[idx++] + 0.5f)) % 64;
lines[i] = (IslandLines) {numData, maskIdx, (LineData*)(data + idx)}; // evil
idx += numData * 6;
}
return lines;
}
Island* convertIslandData(float* islandData, int numIslands) {
Island* islands = (Island*) malloc(sizeof(Island) * numIslands);
int idx = 0;
for(int i =0;i<numIslands;i++) {
int numTriangles = islandData[idx++];
islands[i] = (Island) {numTriangles, islandData + idx};
idx += numTriangles * 6;
}
return islands;
}
ImageData* createImageData(int width, int height) {
ImageData* img = (ImageData*) malloc(sizeof(ImageData));
img->width=width;
img->height=height;
img->scratch = calloc(width * height, sizeof(float));
return img;
}
BrushData* createBrushDataFloat(float thickness) {
BrushData* data = (BrushData*) malloc(sizeof(BrushData));
int width = floor(thickness) * 2 + 1;
int height = width;
data->brushWidth = width;
data->brushHeight = height;
float* brush = (float*) malloc(sizeof(float) * width * height);
data->brush = brush;
int cx = width / 2;
int cy = height / 2;
for( int y = 0;y < height; y++) {
for(int x = 0;x < width; x++) {
int tx = x - cx;
int ty = y - cy;
float dist = thickness - sqrtf(ty*ty + tx*tx);
float val = CLAMP(dist, 0.0f, 1.0f);
int idx = IMAGE_INDEX(x,y,width);
brush[idx] = val;
}
}
return data;
}
void freeBrushData(BrushData* data) {
free(data->brush);
free(data);
}
void writeFourFloat(int x, int y, int w, int h, void* _data, void* _dst) {
float* dst = (float*)_dst;
float* data = (float*)_data;
int index = IMAGE_INDEX(x,y,w) * 4;
dst[index + 0] = data[0];
dst[index + 1] = data[1];
dst[index + 2] = data[2];
dst[index + 3] = data[3];
}
void writeSingleFloat(int x, int y, int w, int h, void* _data, void* _dst) {
float* dst = (float*)_dst;
float data = *((float*)_data);
int index = IMAGE_INDEX(x,y,w);
dst[index] = data;
}
void writeSingleChar(int x, int y, int w, int h, void* _data, void* _dst) {
char* dst = (char*)_dst;
char data = *((char*)_data);
int index = IMAGE_INDEX(x,y,w);
dst[index] = data;
}
void write3x3Plus(int x, int y, int w, int h, void* _data, void* _dst) {
EmbeddedWriteData* data = (EmbeddedWriteData*)_data;
void (*writeFunc)(int, int, int, int, void*, void*) = data->writeFunc;
void* argData = data->argData;
const int offsetX[5] = {0,0,-1,1,0};
const int offsetY[5] = {0,-1,0,0,1};
for(int j=0;j<5;j++) {
const int ox = x+offsetX[j];
const int oy = y+offsetY[j];
if(OUTSIDE_IMAGE(ox,oy,w,h)) {
continue;
}
(*writeFunc)(ox, oy, w, h, argData, _dst);
}
}
void orSingleULL(int x, int y, int w, int h, void* _data, void* _dst) {
unsigned long long* dst = (unsigned long long*)_dst;
unsigned long long data = *((unsigned long long*)_data);
int index = IMAGE_INDEX(x,y,w);
dst[index] |= data;
}
void writeEdgeAware(int x, int y, int w, int h, void* _data, void* _dst) {
// for each neighbor, if any of their neighbors are not in the mask, draw to that pixel.
// runs 16 times per pixel.
const int offsetX[4] = {0,-1,1,0};
const int offsetY[4] = {-1,0,0,1};
EdgeAwareData* data = (EdgeAwareData*)_data;
unsigned long long* bitmask = data->bitmask;
unsigned long long maskIdx = data->maskIdx;
unsigned long long invMaskIdx = ~maskIdx;
void (*writeFunc)(int, int, int, int, void*, void*) = data->writeFunc;
int j, k;
for(j=0;j<4;j++) {
const int lx = x+offsetX[j];
const int ly = y+offsetY[j];
if(OUTSIDE_IMAGE(lx,ly,w,h)) {
continue;
}
// TODO: this sometimes doesn't work?
const int idx = IMAGE_INDEX(lx,ly,w);
if(bitmask[idx] & invMaskIdx) {
continue;
}
for(k=0;k<4;k++) {
const int lx2 = lx+offsetX[k];
const int ly2 = ly+offsetY[k];
if(OUTSIDE_IMAGE(lx2,ly2,w,h)) {
(*writeFunc)(lx, ly, h, w, data->argData, _dst);
break;
}
const int idx2 = IMAGE_INDEX(lx2,ly2,w);
if(!(bitmask[idx2] & maskIdx)) {
(*writeFunc)(lx, ly, h, w, data->argData, _dst);
break;
}
}
}
}
void writeSingleFloatBrush(int x, int y, int w, int h, void* _data, void*_dst) {
BrushData* data = (BrushData*) _data;
int brushWidth = data->brushWidth;
int brushHeight = data->brushHeight;
int hw = brushWidth / 2;
int hh = brushHeight / 2;
float* dst = (float*)_dst;
for( int y2 = 0;y2 < brushHeight; y2++) {
for(int x2 = 0;x2 < brushWidth; x2++) {
int brushX = x2 - hw;
int brushY = y2 - hh;
int cx = x + brushX;
int cy = y + brushY;
if(OUTSIDE_IMAGE(cx,cy,w,h)) {
continue;
}
int brushIdx = IMAGE_INDEX(x2,y2,brushWidth);
int imageIdx = IMAGE_INDEX(cx,cy,w);
float v1 = ((float*)data->brush)[brushIdx];
float v2 = dst[imageIdx];
dst[imageIdx] = __max(v1,v2);
}
}
}
void freeStructData(BitmaskData* bitmask, IslandLines* lines1, IslandLines* lines2, Island* islands, ThreadData* threadData, int numThreads, ImageData* imageData) {
free(bitmask->bitmask);
free(bitmask->dilatedBitmask);
free(bitmask);
free(lines1);
free(lines2);
free(islands);
for(int i =0;i<numThreads;i++) {
free(threadData[i].scratch);
}
free(threadData);
free(imageData->scratch);
free(imageData);
}
void setPixel(float* dst, int x, int y, int w, float r, float g, float b, float a) {
int index = (y * w + x) << 2;
dst[index] = r;
dst[index + 1] = g;
dst[index + 2] = b;
dst[index + 3] = a;
}
int pixelSet(float* dst, int x, int y, int w, int h) {
if(OUTSIDE_IMAGE(x,y,w,h))
return 0;
return dst[(y * w + x) << 2] != 0;
}
int pixelSetMask(char* buf, int x, int y, int w, int h) {
if(OUTSIDE_IMAGE(x,y,w,h))
return 0;
return buf[y * w + x] != 0;
}
int pixelSetMaskBoundary(char* buf, int x, int y, int w, int h) {
if(OUTSIDE_IMAGE(x,y,w,h))
return 1;
return buf[y * w + x] != 0;
}
float gaussian(float x, float fac) {
return 1 / (sqrt(2.0*3.1415926)) * exp(-2*(x*x)/(fac*fac));
}
float* buildKernel(int kw, float blur) {
float* kernel = calloc(kw,sizeof(float));
int kc = kw/2;
float sum = 0;
for (int x=0;x<kw;x++) {
float d = gaussian(kc-x, blur);
sum+=d;
kernel[x] = d;
}
for (int x=0;x<kw;x++) {
kernel[x]/=sum;
}
return kernel;
}
int reflect(int M, int x) {
if(x<0) {
return -x-1;
}
if (x >= M) {
return 2 * M - x - 1;
}
return x;
}
void drawLine(void* dst, int x0, int y0, int x1, int y1, int imgWidth, int imgHeight, void* data, void (*writeFunc)(int, int, int, int, void*, void*)) {
// https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
int dx = abs(x1-x0);
int sx = x0<x1 ? 1 : -1;
int dy = -abs(y1-y0);
int sy = y0<y1 ? 1 : -1;
int err = dx + dy;
int e2;
while(1) {
(*writeFunc)(x0,y0, imgWidth, imgHeight, data, dst);
if(x0 == x1 && y0 == y1) {
break;
}
e2 = 2 * err;
if(e2 >= dy) {
err += dy;
x0 += sx;
}
if(e2 <= dx) {
err += dx;
y0 += sy;
}
}
}
void fillBottomFlatTriangle(void* dst, int x0, int y0, int x1, int y1, int x2, int y2, int imgWidth, int imgHeight,
void* data, void (*writeFunc)(int, int, int, int, void*, void*)) {
float invSlope1 = (float)(x1 - x0) / (float)(y1 - y0);
float invSlope2 = (float)(x2 - x0) / (float)(y2 - y0);
float cx1 = x0;
float cx2 = x0;
for (int y=y0; y<=y2; y++) {
drawLine(dst, cx1, y, cx2, y, imgWidth, imgHeight, data, writeFunc);
cx1+=invSlope1;
cx2+=invSlope2;
}
}
void fillTopFlatTriangle(void* dst, int x0, int y0, int x1, int y1, int x2, int y2, int imgWidth, int imgHeight,
void* data, void (*writeFunc)(int, int, int, int, void*, void*)) {
float invSlope1 = (float)(x2 - x0) / (float)(y2 - y0);
float invSlope2 = (float)(x2 - x1) / (float)(y2 - y1);
float cx1 = x2;
float cx2 = x2;
for (int y=y2; y>=y0; y--) {
drawLine(dst, cx1, y, cx2, y, imgWidth, imgHeight, data, writeFunc);
cx1-=invSlope1;
cx2-=invSlope2;
}
}
void drawTriangle( void* dst, int x0, int y0, int x1, int y1, int x2, int y2, int imgWidth, int imgHeight,
void* data, void (*writeFunc)(int, int, int, int, void*, void*)) {
// http://www.sunshine2k.de/coding/java/TriangleRasterization/TriangleRasterization.html
int y[3];
int x[3];
if(y0==y1 && y1==y2) {
return;
}
// quick hard coded sort
if(y0 <= y1 && y0 <= y2) { // y0 smallest
if(y1 <=y2) {
y[0] = y0; y[1] = y1; y[2] = y2; x[0] = x0; x[1] = x1; x[2] = x2;
} else {
y[0] = y0; y[1] = y2; y[2] = y1; x[0] = x0; x[1] = x2; x[2] = x1;
}
} else if(y1 <= y0 && y1 <= y2) { // y1 smallest
if(y0<=y2) {
y[0] = y1; y[1] = y0; y[2] = y2; x[0] = x1; x[1] = x0; x[2] = x2;
} else {
y[0] = y1; y[1] = y2; y[2] = y0; x[0] = x1; x[1] = x2; x[2] = x0;
}
} else { // y2 smallest
if(y0 <= y1) {
y[0] = y2; y[1] = y0; y[2] = y1; x[0] = x2; x[1] = x0; x[2] = x1;
} else {
y[0] = y2; y[1] = y1; y[2] = y0; x[0] = x2; x[1] = x1; x[2] = x0;
}
}
if(y[1] == y[2]) {
fillBottomFlatTriangle(dst,x[0],y[0],x[1],y[1],x[2],y[2],imgWidth, imgHeight, data, writeFunc);
} else if(y[0] == y[1]) {
fillTopFlatTriangle(dst,x[0],y[0],x[1],y[1],x[2],y[2],imgWidth, imgHeight, data, writeFunc);
} else {
int x3 = (int)(x[0] + ((float)(y[1] - y[0]) / (float)(y[2] - y[0])) * (x[2] - x[0]));
int y3 = y[1];
fillBottomFlatTriangle(dst,x[0],y[0],x[1],y[1],x3,y3,imgWidth, imgHeight, data, writeFunc);
fillTopFlatTriangle(dst,x[1],y[1],x3,y3,x[2],y[2],imgWidth, imgHeight, data, writeFunc);
}
}
void clearThreadData(ThreadData* threadData) {
threadData->topLeft.x=SHRT_MAX;
threadData->topLeft.y=SHRT_MAX;
threadData->bottomRight.x=0;
threadData->bottomRight.y=0;
}
void constrainArea(ImageData* imageData, ThreadData* threadData) {
int width = imageData->width;
int height = imageData->height;
ShortPair topLeft = threadData->topLeft;
ShortPair bottomRight = threadData->bottomRight;
int xmin = CLAMP(topLeft.x,0,width - 1);
int xmax = CLAMP(bottomRight.x,0,width);
int ymin = CLAMP(bottomRight.y,0,height - 1);
int ymax = CLAMP(topLeft.y,0,height);
threadData->topLeft.x = xmin;
threadData->bottomRight.x = xmax;
threadData->topLeft.y = ymax;
threadData->bottomRight.y = ymin;
}
void copyAndClearThreadScratch(BitmaskData* BitmaskData, unsigned long long maskIdx, ThreadData* threadData, ImageData* info, float multiplier) {
int width = info->width;
float* dst = info->scratch;
float* src = threadData->scratch;
unsigned long long* dilatedBitmask = BitmaskData->dilatedBitmask;
constrainArea(info, threadData);
ShortPair topLeft = threadData->topLeft;
ShortPair bottomRight = threadData->bottomRight;
int xmin = topLeft.x;
int xmax = bottomRight.x;
int ymin = bottomRight.y;
int ymax = topLeft.y;
for(int y = ymin; y<ymax; y++) {
for(int x = xmin; x<xmax; x++) {
int idx = IMAGE_INDEX(x,y,width);
if(dilatedBitmask[idx] & maskIdx) {
// #pragma omp atomic write
float val = src[idx] * multiplier;
float max = __max(dst[idx], val);
dst[idx] = CLAMP(max, 0.0, 1.0);
}
src[idx] = 0;
}
}
}
void fixMaskHoles(unsigned long long* mask, ImageData* data) {
int width = data->width;
int height = data->height;
const int offsetX[4] = {0,-1,1,0};
const int offsetY[4] = {-1,0,0,1};
int j;
for(int y =0;y<height;y++) {
for(int x=0;x<width;x++) {
unsigned long long test = -1; // all 1s
for(j=0;j<4;j++) {
if(!OUTSIDE_IMAGE(x+offsetX[j],y+offsetY[j],width,height)) {
test &= mask[IMAGE_INDEX(x,y,width)];
}
}
mask[IMAGE_INDEX(x,y,width)] |= test;
}
}
}
void generateBitmask(BitmaskData* bitmaskData, ImageData* data, Island* islands, int startIdx, int endIdx) {
// clear the bitmask
unsigned long long* bitmask = bitmaskData->bitmask;
unsigned long long* dilatedBitmask = bitmaskData->dilatedBitmask;
memset(bitmask,0,data->width * data->height * sizeof(unsigned long long));
memset(dilatedBitmask,0,data->width * data->height * sizeof(unsigned long long));
float fwidth = (float)data->width;
float fheight = (float)data->height;
const float subtract = 0.0f;
//#pragma omp parallel for
for( int i=startIdx; i<endIdx; i++ ) {
Island island = islands[i];
int num = island.numTriangles * 6;
unsigned long long val[1] = {1ULL<<(i-startIdx)};
EmbeddedWriteData writeData;
writeData.writeFunc = orSingleULL;
writeData.argData = val;
for(int k = 0;k<num;k+=6) {
int x0 = abs((int)(island.triangles[k] * fwidth - subtract));
int y0 = abs((int)(island.triangles[k + 1] * fheight - subtract));
int x1 = abs((int)(island.triangles[k + 2] * fwidth - subtract));
int y1 = abs((int)(island.triangles[k + 3] * fheight - subtract));
int x2 = abs((int)(island.triangles[k + 4] * fwidth - subtract));
int y2 = abs((int)(island.triangles[k + 5] * fheight - subtract));
drawTriangle( (void*)bitmask, x0, y0, x1, y1, x2, y2, data->width, data->height, val, orSingleULL );
drawLine( (void*)bitmask, x0, y0, x1, y1, data->width, data->height, &writeData, write3x3Plus );
drawLine( (void*)bitmask, x1, y1, x2, y2, data->width, data->height, &writeData, write3x3Plus );
drawLine( (void*)bitmask, x2, y2, x0, y0, data->width, data->height, &writeData, write3x3Plus );
}
}
// fixMaskHoles(bitmask, data);
// generate dilated bitmask
int width = data->width;
int height = data->height;
#pragma omp parallel for collapse(2)
for ( int x=0; x<width; x++ ) {
for ( int y=0; y<height; y++ ) {
int idx = IMAGE_INDEX(x,y,width);
// only allow it to bleed outwards to pixels that are not already occupied
if(bitmask[idx]) {
dilatedBitmask[idx] = bitmask[idx];
} else {
unsigned long long val = 0;
for(int j=0;j<9;j++) {
int ox = x+j % 3 - 1;
int oy = y+j / 3 - 1;
if(OUTSIDE_IMAGE(ox,oy,width,height)) {
continue;
}
int idx = IMAGE_INDEX(ox,oy,width);
val |= bitmask[idx];
}
dilatedBitmask[idx] = val;
}
}
}
}
void drawLineSegmentEdgeAware(unsigned long long* bitmask, unsigned long long maskIdx, LineData* lineData, ImageData* imageData, ThreadData* threadData) {
float fwidth = imageData->width;
float fheight = imageData->height;
const float subtract = 0.0f;
const float colour[1] = {1.0f};
LineData data = *lineData;
int x0 = abs((int)(data.xStart * fwidth - subtract));
int y0 = abs((int)(data.yStart * fheight - subtract));
int x1 = abs((int)(data.xEnd * fwidth - subtract));
int y1 = abs((int)(data.yEnd * fheight - subtract));
int minX = __min(x0,x1);
int maxX = __max(x0,x1);
int minY = __min(y0,y1);
int maxY = __max(y0,y1);
threadData->topLeft.x=minX - 2;
threadData->topLeft.y=maxY + 2;
threadData->bottomRight.x=maxX + 2;
threadData->bottomRight.y=minY - 2;
EdgeAwareData argData;
argData.bitmask = bitmask;
argData.maskIdx = maskIdx;
argData.writeFunc = writeSingleFloat;
argData.argData = (void*)colour;
drawLine( threadData->scratch, x0, y0, x1, y1, imageData->width, imageData->height, (void*)(&argData), writeEdgeAware );
// drawLine( threadData->scratch, x1, y1, x0, y0, imageData->width, imageData->height, (void*)(&argData), writeEdgeAware );
}
void drawLineSegmentThickness(LineData* lineData, ImageData* imageData, ThreadData* threadData) {
float fwidth = imageData->width;
float fheight = imageData->height;
const float subtract = 0.0f;
LineData data = *lineData;
int x0 = abs((int)(data.xStart * fwidth - subtract));
int y0 = abs((int)(data.yStart * fheight - subtract));
int x1 = abs((int)(data.xEnd * fwidth - subtract));
int y1 = abs((int)(data.yEnd * fheight - subtract));
float thickness = data.thickness;
BrushData* brushData = createBrushDataFloat(thickness);
int ceilThickness = ceil(thickness);
threadData->topLeft.x -= ceilThickness;
threadData->topLeft.y += ceilThickness;
threadData->bottomRight.x += ceilThickness;
threadData->bottomRight.y -= ceilThickness;
drawLine( threadData->scratch, x0, y0, x1, y1, imageData->width, imageData->height, (void*)brushData, writeSingleFloatBrush );
freeBrushData(brushData);
}
void blurLineSegment(LineData* lineData, ImageData* imageData, ThreadData* threadData) {
float blur = lineData->blur;
if(blur == 0.0) {
return;
}
int ceilBlur = ceil(blur);
int width = imageData->width;
int height = imageData->height;
float* scratch = threadData->scratch;
threadData->topLeft.x -= ceilBlur;
threadData->topLeft.y += ceilBlur;
threadData->bottomRight.x += ceilBlur;
threadData->bottomRight.y -= ceilBlur;
constrainArea(imageData, threadData);
int baseX = threadData->topLeft.x;
int baseY = threadData->bottomRight.y;
int areaWidth = threadData->bottomRight.x - threadData->topLeft.x;
int areaHeight = threadData->topLeft.y - threadData->bottomRight.y;
int kw = (int)(blur + 2) * 2 + 1;
int kc = kw / 2; // center of the kernel
float* kernel = buildKernel(kw, blur);
float* temp = (float*) malloc(areaWidth * areaHeight * sizeof(float));
// y direction (write to temp)
for(int y = 0; y<areaHeight; y++) {
for(int x = 0; x<areaWidth; x++) {
float sum = 0;
int yReal = y + baseY;
int xReal = x + baseX;
for(int i = -kc;i <= kc; i++ ) {
int y1 = reflect(height, yReal + i);
sum += kernel[i+kc] * scratch[IMAGE_INDEX(xReal,y1,width)];
}
temp[IMAGE_INDEX(x,y,areaWidth)] = sum;
}
}
// x direction (write to scratch)
for(int y = 0; y<areaHeight; y++) {
for(int x = 0; x<areaWidth; x++) {
float sum = 0;
int yReal = y + baseY;
int xReal = x + baseX;
for(int i = -kc;i<=kc;i++) {
int x1 = reflect(areaWidth, x + i);
if(OUTSIDE_IMAGE(x1,y,areaWidth,areaHeight)) {
continue;
}
sum += kernel[i+kc] * temp[IMAGE_INDEX(x1,y,areaWidth)];
}
scratch[IMAGE_INDEX(xReal, yReal, width)] = sum;
}
}
free(temp);
free(kernel);
}
void drawLineSegment(BitmaskData* bitmaskData, unsigned long long maskIdx, LineData* lineData, ImageData* imageData, ThreadData* threadData) {
// preliminary pass to get the edges that the line would miss
drawLineSegmentEdgeAware(bitmaskData->bitmask, maskIdx, lineData, imageData, threadData);
drawLineSegmentThickness(lineData, imageData, threadData);
blurLineSegment(lineData, imageData, threadData);
}
void drawLineSegments(float* dst, BitmaskData* bitmaskData, IslandLines* lines, ImageData* imageData, ThreadData* threadData, float multiplier) {
int iters = lines->numLines;
unsigned long long islandIdx = 1ULL<<(lines->islandIdx);
for(int i = 0;i<iters;i++) {
LineData* line = lines->lineData + i;
drawLineSegment(bitmaskData, islandIdx, line, imageData, threadData);
copyAndClearThreadScratch(bitmaskData, islandIdx, threadData, imageData, multiplier);
clearThreadData(threadData);
}
}
void copyTempToDst(ImageData* imageData, float* dst) {
int width = imageData->width;
int height = imageData->height;
int numElements = width * height * 4;
float* scratch = imageData->scratch;
for(int i = 0;i<numElements;i+=4) {
int sIdx = i / 4;
// hard coded white to save memory.
dst[i + 0] = 1.0f;
dst[i + 1] = 1.0f;
dst[i + 2] = 1.0f;
dst[i + 3] = scratch[sIdx];
}
}
void generateBitmaskTest(float* dst, ImageData* data, Island* islands, int startIdx, int endIdx) {
float fwidth = (float)data->width;
float fheight = (float)data->height;
//#pragma omp parallel for
for( int i=startIdx; i<endIdx; i++ ) {
Island island = islands[i];
int num = island.numTriangles * 6;
float vals[4] = {1,1,1,1};
for(int k = 0;k<num;k+=6) {
int x0 = abs((int)round(island.triangles[k] * fwidth));
int y0 = abs((int)round(island.triangles[k + 1] * fheight));
int x1 = abs((int)round(island.triangles[k + 2] * fwidth));
int y1 = abs((int)round(island.triangles[k + 3] * fheight));
int x2 = abs((int)round(island.triangles[k + 4] * fwidth));
int y2 = abs((int)round(island.triangles[k + 5] * fheight));
drawTriangle( (void*)dst, x0, y0, x1, y1, x2, y2, data->width, data->height, vals, &writeFourFloat );
}
}
}
void generateEdgeHighlights( float** lineData, float* tuvData, float* multipliers, int numEntries, int width, int height, float* dst ) {
int threads = omp_get_max_threads();
IslandLines* lines1 = convertLineData(lineData[0], numEntries);
IslandLines* lines2 = convertLineData(lineData[1], numEntries);
Island* islands = convertIslandData(tuvData, numEntries);
ImageData* imageData = createImageData(width, height);
ThreadData* threadData = createThreadData(threads, imageData);
BitmaskData* bitmaskData = createBitmaskData(width, height);
for(int i =0;i<numEntries;i+=64) {
int i2 = __min(i + 64, numEntries);
generateBitmask(bitmaskData, imageData, islands, i, i2);
#pragma omp parallel for
for( int k = i;k < i2;k++ ) {
ThreadData* d = threadData + omp_get_thread_num();
drawLineSegments(dst, bitmaskData, lines1 + k, imageData, d, multipliers[0]);
drawLineSegments(dst, bitmaskData, lines2 + k, imageData, d, multipliers[1]);
}
}
copyTempToDst(imageData, dst);
freeStructData(bitmaskData, lines1, lines2, islands, threadData, threads, imageData);
}
// tuv data is triangulated UVs which act as a mask to determine which pixels can be written to
// uvData is the lines that represent all lines to draw distance field from
void generateDistanceField( float* uvData, int uvLen, float* tuvData, int tuvLen, int width, int height, int target, float* dst, float* retVal ) {
if(uvLen == 0 || tuvLen == 0 || width == 0 || height == 0) {
return;
}
// first, draw a mask for what pixels may be written to
float fwidth = (float) width;
float fheight = (float) height;
const float subtract = 0.0f;
char one[1] = {1};
char* mask = calloc(width * height, sizeof(char));
char* tempBuffer = calloc(width * height, sizeof(char));
EmbeddedWriteData writeData;
writeData.writeFunc = writeSingleChar;
writeData.argData = one;
#pragma omp parallel for
for( int i=0; i<tuvLen; i+=6 ) {
int x0 = abs((int)round(tuvData[i] * fwidth - subtract));
int y0 = abs((int)round(tuvData[i + 1] * fheight - subtract));
int x1 = abs((int)round(tuvData[i + 2] * fwidth - subtract));
int y1 = abs((int)round(tuvData[i + 3] * fheight - subtract));
int x2 = abs((int)round(tuvData[i + 4] * fwidth - subtract));
int y2 = abs((int)round(tuvData[i + 5] * fheight - subtract));
drawTriangle( (void*)mask, x0, y0, x1, y1, x2, y2, width, height, one, writeSingleChar );
drawLine( (void*)mask, x0, y0, x1, y1, width, height, &writeData, write3x3Plus );
drawLine( (void*)mask, x1, y1, x2, y2, width, height, &writeData, write3x3Plus );
drawLine( (void*)mask, x2, y2, x0, y0, width, height, &writeData, write3x3Plus );
}
// next, draw the UV lines to the image
int distSum = 0;
int distPixels = 0;
char* seenPixels = (char*)calloc(width * height, sizeof(char));
short* mapping = (short*)calloc(width * height, sizeof(short));
// allocate enough space for each buffer to hold the entire image
// each list holds several x,y pairs that represent pixels to be checked next
short* openList = (short*)malloc(width * height * 2 * sizeof(short));
short* swapList = (short*)malloc(width * height * 2 * sizeof(short));
int openLen, swapLen;
// draw all UV lines
#pragma omp parallel for
for( int i=0; i<uvLen; i+=4 ) {
const float subtract = 0.5;
int x0 = abs((int)round(uvData[i] * fwidth - subtract));
int y0 = abs((int)round(uvData[i + 1] * fheight - subtract));
int x1 = abs((int)round(uvData[i + 2] * fwidth - subtract));
int y1 = abs((int)round(uvData[i + 3] * fheight - subtract));
drawLine( tempBuffer, x0, y0, x1, y1, width, height, one, writeSingleChar );
}
// fill our openList
openLen = 0;
for ( int x=0;x<width;x++) {
for ( int y=0;y<height;y++) {
int idx = IMAGE_INDEX(x,y,width);
if(tempBuffer[idx]) {
// mark pixels as seen
seenPixels[idx] = 1;
// write to openList
openList[openLen] = x;
openList[openLen + 1] = y;
openLen += 2;
}
}
}
int currentValue = 0;
int seenAny = 1;
while(openLen != 0) {
swapLen = 0;
if(seenAny) {
currentValue++;
seenAny = 0;
}
// multiplied by 2 but doesn't matter
distSum += currentValue * openLen;
distPixels += openLen;
for( int k=0; k<openLen; k+=2 ) {
short xx = openList[k];
short yy = openList[k + 1];
for(int j=0;j<9;j++) {
int ox = xx+j % 3 - 1;
int oy = yy+j / 3 - 1;
if(pixelSetMaskBoundary( seenPixels, ox, oy, width, height )) { // already processed or queued for processing
continue;
}
int idx = IMAGE_INDEX(ox, oy, width);
char maskVal = mask[idx];
seenAny |= maskVal;
mapping[idx] = currentValue;
seenPixels[idx] = 1;
swapList[swapLen] = ox;
swapList[swapLen + 1] = oy;
swapLen += 2;
}
}
// swap our lists
short* temp = openList;
openList = swapList;
swapList = temp;
openLen = swapLen;
}
float pixelDiff = (float)(255 - target) / 255.0 / (float) currentValue;
// map each pixel to it's corresponding value
#pragma omp parallel for collapse(2)
for(int y=0; y<height; y++) {
for(int x=0; x<width; x++) {
float val = 1.0 - pixelDiff * (float)mapping[y * width + x];
setPixel( dst, x, y, width, val, val, val, 1.0);
}
}
free(openList);
free(swapList);
free(seenPixels);
free(mapping);
free(tempBuffer);
free(mask);
*retVal = (float) distSum / (float) distPixels * 4;
} |
msh_hash_grid.h | /*
==============================================================================
MSH_HASH_GRID.H v0.5
A single header library for low-dimensional(2d and 3d) range and nearest neighbor queries.
To use the library you simply add:
#define MSH_HASH_GRID_IMPLEMENTATION
#include "msh_hash_grid.h"
==============================================================================
API DOCUMENTATION
This library focuses on the radius search, which is done by first creating
hash grid from your input pointset, and then calling search procedure.
It is important to note that this library produces search structures that are initialization
depended - the radius supplied during initialization should be close to the radius used in
search queries
Customization
-------------
'msh_hash_grid.h' performs dynamic memory allocation. You have an option to provide alternate memory
allocation functions, by defining following macros prior to inclusion of this file:
- MSH_HG_MALLOC
- MSH_HG_MEMSET
- MSH_HG_CALLOC
- MSH_HG_REALLOC
- MSH_HG_FREE
msh_hash_grid_init_2d
---------------------
void msh_hash_grid_init_2d( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const float radius );
Initializes the 2d hash grid 'hg' using the data passed in 'pts' where the cell size is
selected to best serve queries with 'radius' search distance. 'pts' is expected to
be continuous array of 2d point corrdinates.
msh_hash_grid_init_3d
---------------------
void msh_hash_grid_init_3d( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const float radius );
Initializes the 3d hash grid 'hg' using the data passed in 'pts' where the cell size is
selected to best serve queries with 'radius' search distance. 'pts' is expected to
be continuous array of 3d point corrdinates.
msh_hash_grid_term
---------------------
void msh_hash_grid_term( msh_hash_grid_t* hg );
Terminates storage for grid 'hg'. 'hg' should not be used after this call.
msh_hash_grid_radius_search
---------------------
size_t msh_hash_grid_radius_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* search_desc );
Performs radius search using 'hg' as acceleration structure, with search queries described
in 'search_desc'. Returns the total number of neighbors found. The members of
'msh_hash_grid_search_desc_t' are:
float* query_pts - INPUT: array of query points. Provided and owned by the user
size_t n_query_pts - INPUT: size of query points array. Provided by the user.
float radius - OPTION: radius within which we wish to find neighbors for each query
int sort - OPTION: should the results be sorted from closest to farthest
size_t max_n_neigh/k - OPTION: maximum number of neighbors allowed for each query.
float* distances_sq - OUTPUT: max_n_neigh * n_query_pts matrix of squared distances to neighbors
of query pts that are within radius. Each row contains up
to max_n_neigh neighbors for i-th query pts. This array is allocated
internally by library, but ownership is then passed to the user.
int32_t* indices - OUTPUT: max_n_neigh * n_query_pts array of indices to neighbors of query
pts that are within radius. Each row contains up
to max_n_neigh neighbors for i-th query pts. This array is allocated
internally by library, but ownership is then passed to the user.
size_t* n_neighbors - OUTPUT: n_query_pts array of number of number neighbors found for
each of query pts. Note that for i-th points we could find less
than max_n_neighbors. This array should be used when iterating over
indices and distances_sq matrices.
Note that when doing searches, 'max_n_neigh' parameter is important - set it too low and
you will not find all neighbors within the radius (the k returned will still be the k closest though).
Set it too high, and there will be a decent amount of memory wasting and cache misses.
msh_hash_grid_knn_search
---------------------
size_t msh_hash_grid_knn_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* search_desc );
Exactly the same as 'msh_hash_grid_radius_search', except search will be performed until
'k' (specified in 'search_desc') neighbors will be found. Depending on how large 'k' is,
these queries might not be very fast.
==============================================================================
DEPENDENCIES
This file requires following c stdlib headers:
- <stdlib.h>
- <stdint.h>
- <string.h>
- <stdio.h>
- <stdbool.h>
- <stddef.h>
Note that this file will not pull them in. This is to prevent pulling the same
files multiple times, especially within single compilation unit.
To actually include the headers, simply define following before including the library:
#define MSH_HASH_GRID_INCLUDE_HEADERS
==============================================================================
AUTHORS:
Maciej Halber
CREDITS:
Map implementation based on bitwise by Per Vogsnen
Dynamic array based on stb.h by Sean T. Barrett
Licensing information can be found at the end of the file.
==============================================================================
TODOs:
[ ] Replace openMP with a custom threading/scheduler implementation
[ ] Compatibility function
[ ] Allow user to specify compatibility function instead of just L2 norm
[ ] Allow user to provide some extra user data like normals for computing the distances
[ ] Write ICP example + visualization to test this. Question how well that helps icp converge
[ ] Assert proof
[ ] Docs
[x] Fix issue when _init function cannot be used if no implementation is declared.
[x] Optimization - in both knn and radius I need a better way to determine whether I can early out
[x] Optimization - see if I can simplify the radius search function for small search radii.
--> Very small gains given the increase in complexity.
[x] Optimization - spatial locality - sort linear data on bin idx or morton curves
--> Does not seem to produce improvement. Something else must be dominating the times
--> Maybe morton curves will be better
[x] Fix knn search
[x] Multithread knn
[x] Heap implementation for knn radius
[x] Use <algorithm> first
[x] Implement own version and compare
[x] Multithreading
[x] API for supplying more then a single point
[x] OpenMP optional support ( if -fopenmp was supplied, sequential otherwise)
[x] Params struct for searching
[x] Add 2d support on API level
==============================================================================
*/
#ifndef MSH_HASH_GRID_H
#define MSH_HASH_GRID_H
#if defined(MSH_HASH_GRID_INCLUDE_HEADERS)
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include <stddef.h>
#endif
#ifndef MSH_HG_MALLOC
#define MSH_HG_MALLOC(x) malloc((x))
#endif
#ifndef MSH_HG_MEMSET
#define MSH_HG_MEMSET(x,y,z) memset((x), (y), (z))
#endif
#ifndef MSH_HG_CALLOC
#define MSH_HG_CALLOC(x,y) calloc((x), (y))
#endif
#ifndef MSH_HG_REALLOC
#define MSH_HG_REALLOC(x,y) realloc((x), (y))
#endif
#ifndef MSH_HG_FREE
#define MSH_HG_FREE(x) free((x))
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef struct msh_hash_grid msh_hash_grid_t;
typedef struct msh_hash_grid_search_desc
{
float* query_pts;
size_t n_query_pts;
float* distances_sq;
int32_t* indices;
size_t* n_neighbors;
float radius;
union
{
size_t k;
size_t max_n_neigh;
};
int sort;
#ifdef MSH_JOBS
msh_jobs_ctx_t* work_ctx;
#endif
} msh_hash_grid_search_desc_t;
void msh_hash_grid_init_2d( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const float radius );
void msh_hash_grid_init_3d( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const float radius );
void msh_hash_grid_term( msh_hash_grid_t* hg );
size_t msh_hash_grid_radius_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* search_desc );
size_t msh_hash_grid_knn_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* search_desc );
typedef struct msh_hg_v3
{
float x, y, z;
} msh_hg_v3_t;
typedef struct msh_hg_v3i
{
float x, y, z;
int32_t i;
} msh_hg_v3i_t;
typedef struct msh_hg_bin_data msh_hg__bin_data_t;
typedef struct msh_hg_bin_info msh_hg__bin_info_t;
typedef struct msh_hg_map msh_hg_map_t;
typedef struct msh_hash_grid
{
size_t width;
size_t height;
size_t depth;
double cell_size;
msh_hg_v3_t min_pt;
msh_hg_v3_t max_pt;
msh_hg_map_t* bin_table;
msh_hg_v3i_t* data_buffer;
msh_hg__bin_info_t* offsets;
int32_t _slab_size;
double _inv_cell_size;
uint8_t _pts_dim;
uint16_t _num_threads;
int32_t _dont_use_omp;
uint32_t max_n_pts_in_bin;
size_t _n_pts;
} msh_hash_grid_t;
typedef struct msh_hg_map
{
uint64_t* keys;
uint64_t* vals;
size_t _len;
size_t _cap;
} msh_hg_map_t;
typedef struct msh_hg_bin_data
{
int32_t n_pts;
msh_hg_v3i_t* data;
} msh_hg__bin_data_t;
typedef struct msh_hg_bin_info
{
uint32_t offset;
uint32_t length;
} msh_hg__bin_info_t;
#ifdef __cplusplus
}
#endif
#endif /* MSH_HASH_GRID_H */
#ifdef MSH_HASH_GRID_IMPLEMENTATION
////////////////////////////////////////////////////////////////////////////////////////////////////
// IMPLEMENTATION
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
// THIS IS A SIMPLIFIED VERSION OF MSH_STD_ARRAY
#ifdef __cplusplus
extern "C" {
#endif
typedef struct msh_hg_array_header
{
size_t len;
size_t cap;
} msh_hg_array_hdr_t;
#define msh_hg_array(T) T*
void* msh_hg__array_grow(const void *array, size_t new_len, size_t elem_size);
#define msh_hg_array__grow_formula(x) ((2*(x)+5))
#define msh_hg_array__hdr(a) ((msh_hg_array_hdr_t *)((char *)(a) - sizeof(msh_hg_array_hdr_t)))
#define msh_hg_array_len(a) ((a) ? (msh_hg_array__hdr((a))->len) : 0)
#define msh_hg_array_cap(a) ((a) ? (msh_hg_array__hdr((a))->cap) : 0)
#define msh_hg_array_front(a) ((a) ? (a) : NULL)
#define msh_hg_array_back(a) (msh_hg_array_len((a)) ? ((a) + msh_hg_array_len((a)) - 1 ) : NULL)
#define msh_hg_array_free(a) ((a) ? (MSH_HG_FREE(msh_hg_array__hdr(a)), (a) = NULL) : 0 )
#define msh_hg_array_fit(a, n) ((n) <= msh_hg_array_cap(a) ? (0) : ( *(void**)&(a) = msh_hg__array_grow((a), (n), sizeof(*(a))) ))
#define msh_hg_array_push(a, ...) (msh_hg_array_fit((a), 1 + msh_hg_array_len((a))), (a)[msh_hg_array__hdr(a)->len++] = (__VA_ARGS__))
#define MSH_HG_MAX(a, b) ((a) > (b) ? (a) : (b))
#define MSH_HG_MIN(a, b) ((a) <= (b) ? (a) : (b))
#define MSH_HG_MAX3(a, b, c) MSH_HG_MAX(MSH_HG_MAX(a,b), MSH_HG_MAX(b,c))
#ifdef __cplusplus
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// THIS IS A COPY OF MSH_MAP
uint64_t msh_hg_hash_uint64( uint64_t x );
void msh_hg_map_init( msh_hg_map_t* map, uint32_t cap );
void msh_hg_map_free( msh_hg_map_t* map );
size_t msh_hg_map_len( msh_hg_map_t* map );
size_t msh_hg_map_cap( msh_hg_map_t* map );
void msh_hg_map_insert( msh_hg_map_t* map, uint64_t key, uint64_t val );
uint64_t* msh_hg_map_get( const msh_hg_map_t* map, uint64_t key );
// NOTE(maciej): This is not very comprehensive as far as platform detection macros go.
#if defined(_MSC_VER)
#define MSH_HG_INLINE __forceinline
#else
#define MSH_HG_INLINE __attribute__((always_inline, unused)) inline
#endif
MSH_HG_INLINE msh_hg_v3_t
msh_hg__vec3_add( msh_hg_v3_t a, msh_hg_v3_t b )
{
return (msh_hg_v3_t) { a.x + b.x, a.y + b.y, a.z + b.z };
}
MSH_HG_INLINE msh_hg_v3_t
msh_hg__vec3_sub( msh_hg_v3_t a, msh_hg_v3_t b )
{
return (msh_hg_v3_t) { a.x - b.x, a.y - b.y, a.z - b.z };
}
MSH_HG_INLINE uint64_t
msh_hash_grid__bin_pt( const msh_hash_grid_t* hg, uint64_t ix, uint64_t iy, uint64_t iz )
{
uint64_t bin_idx = iz * hg->_slab_size + iy * hg->width + ix;
return bin_idx;
}
int uint64_compare( const void * a, const void * b )
{
return ( *(uint64_t*)a - *(uint64_t*)b );
}
void
msh_hash_grid__init( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const int32_t dim,
const float radius )
{
assert( dim == 2 || dim == 3 );
if( hg->_num_threads == 0 )
{
#if defined(_OPENMP)
#pragma omp parallel
{
hg->_num_threads = omp_get_num_threads();
}
#else
hg->_num_threads = 1;
#endif
}
#if defined(_OPENMP)
if( hg->_num_threads == 1 ) { hg->_dont_use_omp = 1; }
#endif
hg->_pts_dim = dim;
// Compute bbox
hg->min_pt = (msh_hg_v3_t){ .x = 1e9, .y = 1e9, .z = 1e9 };
hg->max_pt = (msh_hg_v3_t){ .x = -1e9, .y = -1e9, .z = -1e9 };
for( int i = 0; i < n_pts; ++i )
{
const float* pt_ptr = &pts[ dim * i ];
msh_hg_v3_t pt;
if( dim == 2 ) { pt = (msh_hg_v3_t){ .x = pt_ptr[0], .y = pt_ptr[1], .z = 0 }; }
else { pt = (msh_hg_v3_t){ .x = pt_ptr[0], .y = pt_ptr[1], .z = pt_ptr[2] }; };
hg->min_pt.x = (hg->min_pt.x > pt.x) ? pt.x : hg->min_pt.x;
hg->min_pt.y = (hg->min_pt.y > pt.y) ? pt.y : hg->min_pt.y;
hg->min_pt.z = (hg->min_pt.z > pt.z) ? pt.z : hg->min_pt.z;
hg->max_pt.x = (hg->max_pt.x < pt.x) ? pt.x : hg->max_pt.x;
hg->max_pt.y = (hg->max_pt.y < pt.y) ? pt.y : hg->max_pt.y;
hg->max_pt.z = (hg->max_pt.z < pt.z) ? pt.z : hg->max_pt.z;
}
hg->max_pt.x += 0.0001f; hg->max_pt.y += 0.0001f; hg->max_pt.z += 0.0001f;
hg->min_pt.x -= 0.0001f; hg->min_pt.y -= 0.0001f; hg->min_pt.z -= 0.0001f;
// Calculate dimensions
float dim_x = (hg->max_pt.x - hg->min_pt.x);
float dim_y = (hg->max_pt.y - hg->min_pt.y);
float dim_z = (hg->max_pt.z - hg->min_pt.z);
float max_dim = MSH_HG_MAX3( dim_x, dim_y, dim_z );
// Calculate cell size
if( radius > 0.0 ) { hg->cell_size = 2.0 * radius; }
else { hg->cell_size = max_dim / (32 * sqrtf(3.0f)); }
hg->width = (int)(dim_x / hg->cell_size + 1.0);
hg->height = (int)(dim_y / hg->cell_size + 1.0) ;
hg->depth = (int)(dim_z / hg->cell_size + 1.0) ;
hg->_inv_cell_size = 1.0f/ hg->cell_size;
hg->_slab_size = hg->height * hg->width;
hg->_n_pts = 0;
// Create hash table
hg->bin_table = (msh_hg_map_t*)MSH_HG_CALLOC( 1, sizeof(msh_hg_map_t) );
msh_hg_map_init( hg->bin_table, 128 );
msh_hg_array( msh_hg__bin_data_t ) bin_table_data = {0};
uint64_t n_bins = 0;
for( int i = 0 ; i < n_pts; ++i )
{
const float* pt_ptr = &pts[ dim * i ];
msh_hg_v3i_t pt_data;
if( dim == 2 )
{
pt_data = (msh_hg_v3i_t){ .x = pt_ptr[0], .y = pt_ptr[1], .z = 0, .i = i };
}
else
{
pt_data = (msh_hg_v3i_t){ .x = pt_ptr[0], .y = pt_ptr[1], .z = pt_ptr[2], .i = i };
}
uint64_t ix = (uint64_t)( ( pt_data.x - hg->min_pt.x ) * hg->_inv_cell_size );
uint64_t iy = (uint64_t)( ( pt_data.y - hg->min_pt.y ) * hg->_inv_cell_size );
uint64_t iz = (uint64_t)( ( pt_data.z - hg->min_pt.z ) * hg->_inv_cell_size );
uint64_t bin_idx = msh_hash_grid__bin_pt( hg, ix, iy, iz );
uint64_t* bin_table_idx = msh_hg_map_get( hg->bin_table, bin_idx );
if( bin_table_idx )
{
bin_table_data[*bin_table_idx].n_pts += 1;
msh_hg_array_push( bin_table_data[*bin_table_idx].data, pt_data );
}
else
{
msh_hg_map_insert( hg->bin_table, bin_idx, n_bins );
msh_hg__bin_data_t new_bin = {0};
new_bin.n_pts = 1;
msh_hg_array_push( new_bin.data, pt_data );
msh_hg_array_push( bin_table_data, new_bin );
n_bins++;
}
}
// Prepare storage for linear data
hg->offsets = (msh_hg__bin_info_t*)MSH_HG_MALLOC( n_bins * sizeof(msh_hg__bin_info_t) );
hg->data_buffer = (msh_hg_v3i_t*)MSH_HG_MALLOC( n_pts * sizeof( msh_hg_v3i_t ) );
MSH_HG_MEMSET( hg->offsets, 0, n_bins * sizeof(msh_hg__bin_info_t) );
// Gather indices of bins that have data in them from hash table
msh_array( uint64_t ) filled_bin_indices = {0};
for( size_t i = 0; i < msh_hg_map_cap(hg->bin_table); ++i )
{
// Remember that msh_hg_map internally increments the index, so we need to decrement it here.
if( hg->bin_table->keys[i] )
{
msh_array_push( filled_bin_indices, hg->bin_table->keys[i] - 1);
}
}
qsort( filled_bin_indices, msh_hg_array_len( filled_bin_indices ), sizeof(uint64_t), uint64_compare );
// Now lay the data into an array based on the sorted keys (following fill order)
// TODO(maciej): Morton ordering?
hg->max_n_pts_in_bin = 0;
uint32_t offset = 0;
for( size_t i = 0; i < msh_hg_array_len(filled_bin_indices); ++i )
{
uint64_t* bin_index = msh_hg_map_get( hg->bin_table, filled_bin_indices[i] );
assert( bin_index );
msh_hg__bin_data_t* bin = &bin_table_data[ *bin_index ];
assert( bin );
uint32_t n_bin_pts = bin->n_pts;
hg->_n_pts += n_bin_pts;
hg->max_n_pts_in_bin = MSH_HG_MAX( n_bin_pts, hg->max_n_pts_in_bin );
for( uint32_t j = 0; j < n_bin_pts; ++j )
{
hg->data_buffer[ offset + j ] = bin->data[j] ;
}
hg->offsets[ *bin_index ] = (msh_hg__bin_info_t) { .offset = offset, .length = n_bin_pts };
offset += n_bin_pts;
}
// Clean-up temporary data
for( size_t i = 0; i < n_bins; ++i )
{
msh_hg_array_free( bin_table_data[i].data );
}
msh_hg_array_free( bin_table_data );
}
void
msh_hash_grid_init_2d( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const float radius)
{
msh_hash_grid__init( hg, pts, n_pts, 2, radius );
}
void
msh_hash_grid_init_3d( msh_hash_grid_t* hg,
const float* pts, const int32_t n_pts, const float radius)
{
msh_hash_grid__init( hg, pts, n_pts, 3, radius );
}
void
msh_hash_grid_term( msh_hash_grid_t* hg )
{
hg->width = 0;
hg->height = 0;
hg->depth = 0;
hg->cell_size = 0.0f;
hg->min_pt = (msh_hg_v3_t){ 0.0f, 0.0f, 0.0f };
hg->max_pt = (msh_hg_v3_t){ 0.0f, 0.0f, 0.0f };
hg->_slab_size = 0.0f;
hg->_inv_cell_size = 0.0f;
MSH_HG_FREE( hg->data_buffer ); hg->data_buffer = NULL;
MSH_HG_FREE( hg->offsets ); hg->offsets = NULL;
MSH_HG_FREE( hg->bin_table ); hg->bin_table = NULL;
}
// NOTE(maciej): This implementation is a special case modification of a templated
// sort by Sean T. Barret from stb.h. We simply want to allow sorting both the indices
// and distances if user requested returning sorted results.
void
msh_hash_grid__ins_sort( float *dists, int32_t* indices, int n )
{
int i = 0;
int j = 0;
for( i = 1; i < n; ++i )
{
float da = dists[i];
int32_t ia = indices[i];
j = i;
while( j > 0 )
{
float db = dists[j-1];
if( da >= db ) { break; }
dists[j] = dists[j-1];
indices[j] = indices[j-1];
--j;
}
if (i != j)
{
dists[j] = da;
indices[j] = ia;
}
}
}
void
msh_hash_grid__quick_sort( float *dists, int32_t* indices, int n )
{
// threshold for transitioning to insertion sort
while( n > 12 )
{
float da, db, dt;
int32_t it = 0;
int32_t c01, c12, c, m, i, j;
// compute median of three
m = n >> 1;
da = dists[0];
db = dists[m];
c = da < db;
c01 = c;
da = dists[m];
db = dists[n-1];
c = da < db;
c12 = c;
// if 0 >= mid >= end, or 0 < mid < end, then use mid
if( c01 != c12 )
{
// otherwise, we'll need to swap something else to middle
int32_t z;
da = dists[0];
db = dists[n-1];
c = da < db;
// 0>mid && mid<n: 0>n => n; 0<n => 0
// 0<mid && mid>n: 0>n => 0; 0<n => n
z = (c == c12) ? 0 : n-1;
dt = dists[z];
dists[z] = dists[m];
dists[m] = dt;
it = indices[z];
indices[z] = indices[m];
indices[m] = it;
}
// now dists[m] is the median-of-three swap it to the beginning so it won't move around
dt = dists[0];
dists[0] = dists[m];
dists[m] = dt;
it = indices[0];
indices[0] = indices[m];
indices[m] = it;
// partition loop
i=1;
j=n-1;
for(;;)
{
// handling of equality is crucial here for sentinels & efficiency with duplicates
db = dists[0];
for( ;;++i )
{
da = dists[i];
c = da < db;
if (!c) break;
}
da = dists[0];
for( ;;--j ) {
db = dists[j];
c = da < db;
if (!c) break;
}
// make sure we haven't crossed
if( i >= j ) { break; }
dt = dists[i];
dists[i] = dists[j];
dists[j] = dt;
it = indices[i];
indices[i] = indices[j];
indices[j] = it;
++i;
--j;
}
// recurse on smaller side, iterate on larger
if( j < (n-i) )
{
msh_hash_grid__quick_sort( dists, indices, j );
dists = dists + i;
indices = indices + i;
n = n - i;
}
else
{
msh_hash_grid__quick_sort( dists + i, indices + i, n - i );
n = j;
}
}
}
void
msh_hash_grid__sort( float* dists, int32_t* indices, int n )
{
msh_hash_grid__quick_sort( dists, indices, n );
msh_hash_grid__ins_sort( dists, indices, n );
}
// Heap implementation with a twist that we swap array of indices based on the distance heap
void
msh_hash_grid__heapify( float *dists, int32_t* ind, size_t len, size_t cur )
{
size_t max = cur;
const size_t left = (cur<<1) + 1;
const size_t right = (cur<<1) + 2;
if( (left < len) && (dists[left] > dists[cur]) ) { max = left; }
if( (right < len) && (dists[right] > dists[max]) ) { max = right; }
if( max != cur ) // need to swap
{
float tmp_dist = dists[cur];
dists[cur] = dists[max];
dists[max] = tmp_dist;
int32_t tmp_idx = ind[cur];
ind[cur] = ind[max];
ind[max] = tmp_idx;
msh_hash_grid__heapify( dists, ind, len, max );
}
}
void msh_hash_grid__heap_make( real32_t* dists, int32_t* ind, size_t len )
{
int64_t i = len >> 1;
while ( i >= 0 ) { msh_hash_grid__heapify( dists, ind, len, i-- ); }
}
void msh_hash_grid__heap_pop( real32_t* dists, int32_t* ind, size_t len )
{
float max_dist = dists[0];
dists[0] = dists[len-1];
dists[len-1] = max_dist;
float max_idx = ind[0];
ind[0] = ind[len-1];
ind[len-1] = max_idx;
len--;
if( len > 0 ){ msh_hash_grid__heapify( dists, ind, len, 0 ); }
}
void msh_hash_grid__heap_push( real32_t* dists, int32_t* ind, size_t len )
{
int64_t i = len - 1;
float d = dists[i];
int32_t idx = ind[i];
while( i > 0 )
{
int64_t j = (i - 1) >> 1;
if( dists[j] >= d ) break;
dists[i] = dists[j];
ind[i] = ind[j];
i = j;
}
dists[i] = d;
ind[i] = idx;
}
typedef struct msh_hash_grid_dist_storage
{
size_t cap;
size_t len;
real32_t max_dist;
real32_t* dists;
int32_t* indices;
int32_t is_heap;
} msh_hash_grid_dist_storage_t;
void
msh_hash_grid_dist_storage_init( msh_hash_grid_dist_storage_t* q,
const int k, float* dists, int32_t* indices )
{
q->cap = k;
q->len = 0;
q->max_dist = -MSH_F32_MAX;
q->is_heap = 0;
q->dists = dists;
q->indices = indices;
}
uint32_t query_counter = 0;
uint32_t skip_counter = 0;
uint32_t valid_counter = 0;
MSH_HG_INLINE void
msh_hash_grid_dist_storage_push( msh_hash_grid_dist_storage_t* q,
const float dist, const int32_t idx )
{
if( q->len >= q->cap && dist >= q->max_dist ) { return; }
if( q->len >= q->cap )
{
// remove farthest if at capacity
msh_hash_grid__heap_pop( q->dists, q->indices, q->len );
q->len--;
}
// add new element
q->dists[ q->len ] = dist;
q->indices[ q->len ] = idx;
q->len++;
if( q->is_heap) { msh_hash_grid__heap_push( q->dists, q->indices, q->len ); }
if( q->len >= q->cap && !q->is_heap )
{
msh_hash_grid__heap_make( q->dists, q->indices, q->len );
q->is_heap = 1;
}
if( q->is_heap ) { q->max_dist = q->dists[0]; }
else if ( q->max_dist <= dist ) { q->max_dist = dist; }
}
void
msh_hash_grid__find_neighbors_in_bin( const msh_hash_grid_t* hg, const uint64_t bin_idx,
const float radius_sq, const float* pt,
msh_hash_grid_dist_storage_t* s )
{
// issue this whole things stops working if we use doubles.
uint64_t* bin_table_idx = msh_hg_map_get( hg->bin_table, bin_idx );
if( !bin_table_idx ) { return; }
msh_hg__bin_info_t bi = hg->offsets[ *bin_table_idx ];
uint32_t n_pts = bi.length;
const msh_hg_v3i_t* data = &hg->data_buffer[bi.offset];
float px = pt[0];
float py = pt[1];
float pz = (hg->_pts_dim == 2 ) ? 0.0 : pt[2];
for( uint32_t i = 0; i < n_pts; ++i )
{
// TODO(maciej): Maybe SSE?
float dix = data[i].x;
float diy = data[i].y;
float diz = data[i].z;
int32_t dii = data[i].i;
float vx = dix - px;
float vy = diy - py;
float vz = diz - pz;
float dist_sq = vx * vx + vy * vy + vz * vz;
if( dist_sq < radius_sq )
{
msh_hash_grid_dist_storage_push( s, dist_sq, dii );
}
}
}
uint32_t
msh_hash_grid__radius_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* hg_sd,
uint32_t start_idx, uint32_t end_idx )
{
if( !hg || !hg_sd ) { return 0; }
enum { MAX_BIN_COUNT = 256 };
int32_t bin_indices[ MAX_BIN_COUNT ];
float bin_dists_sq[ MAX_BIN_COUNT ];
float radius = hg_sd->radius;
double radius_sq = (double)radius * (double)radius;
size_t row_size = hg_sd->max_n_neigh;
msh_hash_grid_dist_storage_t storage;
uint32_t total_num_neighbors = 0;
for( uint32_t pt_idx = start_idx; pt_idx < end_idx; ++pt_idx )
{
float* query_pt = hg_sd->query_pts + (hg->_pts_dim * pt_idx);
size_t* n_neighbors = hg_sd->n_neighbors ? (hg_sd->n_neighbors + pt_idx) : NULL;
float* dists_sq = hg_sd->distances_sq + (pt_idx * row_size);
int32_t* indices = hg_sd->indices + (pt_idx * row_size);
// Prep the storage for the next point
msh_hash_grid_dist_storage_init( &storage, row_size, dists_sq, indices );
// Normalize query pt with respect to grid
msh_hg_v3_t q;
if( hg->_pts_dim == 2 )
{
q = (msh_hg_v3_t) { query_pt[0] - hg->min_pt.x,
query_pt[1] - hg->min_pt.y,
0.0 };
}
else
{
q = (msh_hg_v3_t) { query_pt[0] - hg->min_pt.x,
query_pt[1] - hg->min_pt.y,
query_pt[2] - hg->min_pt.z };
}
// Get base bin idx for query pt
int64_t ix = (int64_t)( q.x * hg->_inv_cell_size );
int64_t iy = (int64_t)( q.y * hg->_inv_cell_size );
int64_t iz = (int64_t)( q.z * hg->_inv_cell_size );
// Decide where to look
int64_t px = (int64_t)( (q.x + radius) * hg->_inv_cell_size );
int64_t nx = (int64_t)( (q.x - radius) * hg->_inv_cell_size );
int64_t opx = px - ix;
int64_t onx = nx - ix;
int64_t py = (int64_t)( (q.y + radius) * hg->_inv_cell_size );
int64_t ny = (int64_t)( (q.y - radius) * hg->_inv_cell_size );
int64_t opy = py - iy;
int64_t ony = ny - iy;
int64_t pz = (int64_t)( (q.z + radius) * hg->_inv_cell_size );
int64_t nz = (int64_t)( (q.z - radius) * hg->_inv_cell_size );
int64_t opz = pz - iz;
int64_t onz = nz - iz;
uint32_t n_visited_bins = 0;
float dx, dy, dz;
int64_t cx, cy, cz;
for( int64_t oz = onz; oz <= opz; ++oz )
{
cz = (int64_t)iz + oz;
if( cz < 0 || cz >= (int64_t)hg->depth ) { continue; }
uint64_t idx_z = cz * hg->_slab_size;
if( oz < 0 ) { dz = q.z - (cz + 1) * hg->cell_size; }
else if( oz > 0 ) { dz = cz * hg->cell_size - q.z; }
else { dz = 0.0f; }
for( int64_t oy = ony; oy <= opy; ++oy )
{
cy = iy + oy;
if( cy < 0 || cy >= (int64_t)hg->height ) { continue; }
uint64_t idx_y = cy * hg->width;
if( oy < 0 ) { dy = q.y - (cy + 1) * hg->cell_size; }
else if( oy > 0 ) { dy = cy * hg->cell_size - q.y; }
else { dy = 0.0f; }
for( int64_t ox = onx; ox <= opx; ++ox )
{
cx = ix + ox;
if( cx < 0 || cx >= (int64_t)hg->width ) { continue; }
// assert( n_visited_bins < MAX_BIN_COUNT );
if( n_visited_bins >= MAX_BIN_COUNT ) { goto msh_hash_grid_lbl__find_neighbors2; }
bin_indices[n_visited_bins] = idx_z + idx_y + cx;
if( ox < 0 ) { dx = q.x - (cx + 1) * hg->cell_size; }
else if( ox > 0 ) { dx = cx * hg->cell_size - q.x; }
else { dx = 0.0f; }
bin_dists_sq[n_visited_bins] = dz * dz + dy * dy + dx * dx;
n_visited_bins++;
}
}
}
msh_hash_grid_lbl__find_neighbors2:
msh_hash_grid__sort( bin_dists_sq, bin_indices, n_visited_bins );
for( uint32_t i = 0; i < n_visited_bins; ++i )
{
msh_hash_grid__find_neighbors_in_bin( hg, bin_indices[i], radius_sq, query_pt, &storage );
if( storage.len >= row_size &&
storage.max_dist <= bin_dists_sq[i] )
{
break;
}
}
if( hg_sd->sort ) { msh_hash_grid__sort( dists_sq, indices, storage.len ); }
if( n_neighbors ) { (*n_neighbors++) = storage.len; }
total_num_neighbors += storage.len;
}
return total_num_neighbors;;
}
#ifdef MSH_JOBS
typedef struct msh_hash_grid__work_opts
{
const msh_hash_grid_t* hg;
msh_hash_grid_search_desc_t* hg_sd;
uint32_t start_idx;
uint32_t end_idx;
uint32_t volatile *total_num_neighbors;
} msh_hash_grid__work_opts_t;
MSH_JOBS_JOB_SIGNATURE(msh_hash_grid__run_radius_search)
{
msh_hash_grid__work_opts_t opts = *((msh_hash_grid__work_opts_t*)params);
uint32_t cur_num_neighbors = msh_hash_grid__radius_search( opts.hg, opts.hg_sd, opts.start_idx, opts.end_idx );
msh_jobs_atomic_add( opts.total_num_neighbors, cur_num_neighbors );
return 0;
}
#endif
size_t
msh_hash_grid_radius_search2( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* hg_sd )
{
assert( hg_sd->query_pts );
assert( hg_sd->distances_sq );
assert( hg_sd->indices );
assert( hg_sd->radius > 0.0 );
assert( hg_sd->n_query_pts > 0 );
assert( hg_sd->max_n_neigh > 0 );
#ifdef MSH_JOBS
uint32_t single_thread_limit = 64;
if( !hg_sd->work_ctx || hg_sd->n_query_pts < single_thread_limit )
{
return msh_hash_grid__radius_search( hg, hg_sd, 0, hg_sd->n_query_pts );
}
else
{
#if 1
uint32_t volatile total_num_neighbors = 0;
enum{ MSH_HASH_GRID__N_TASKS = 128 };
msh_hash_grid__work_opts_t work_array[MSH_HASH_GRID__N_TASKS];
uint32_t n_pts_per_task = hg_sd->n_query_pts / MSH_HASH_GRID__N_TASKS + 1;
n_pts_per_task = n_pts_per_task < single_thread_limit ? single_thread_limit : n_pts_per_task;
for( uint32_t work_idx = 0; work_idx < MSH_HASH_GRID__N_TASKS; ++work_idx )
{
uint32_t start_idx = work_idx * n_pts_per_task;
uint32_t end_idx = msh_min( (work_idx + 1) * n_pts_per_task, hg_sd->n_query_pts );
if( start_idx > hg_sd->n_query_pts ) { break; }
msh_hash_grid__work_opts_t* work_entry = work_array + work_idx;
work_entry->hg = hg;
work_entry->hg_sd = hg_sd;
work_entry->total_num_neighbors = &total_num_neighbors;
work_entry->start_idx = start_idx;
work_entry->end_idx = end_idx;
msh_jobs_push_work( hg_sd->work_ctx, msh_hash_grid__run_radius_search, work_entry );
}
msh_jobs_complete_all_work( hg_sd->work_ctx );
return total_num_neighbors;
#else
// NOTE(maciej): This does not work completely, as it sometimes overflows the queue
uint32_t volatile total_num_neighbors = 0;
uint32_t start_idx = 0;
int64_t pts_left = hg_sd->n_query_pts;
// NOTE(maciej): We could always just generate 16 jobs or smth like that to avoid malloc. Will need to test.
uint32_t work_idx = 0;
uint32_t n_tasks = pts_left / single_thread_limit + 1;
msh_hash_grid__work_opts_t* work_array = malloc( n_tasks * sizeof(msh_hash_grid__work_opts_t) );
while( pts_left > 0 )
{
uint32_t count = (pts_left >= single_thread_limit) ? single_thread_limit : pts_left;
msh_hash_grid__work_opts_t* work_entry = work_array + work_idx++;
work_entry->hg = hg;
work_entry->hg_sd = hg_sd;
work_entry->total_num_neighbors = &total_num_neighbors;
work_entry->start_idx = start_idx;
work_entry->end_idx = start_idx + count;
msh_jobs_push_work( hg_sd->work_ctx, msh_hash_grid__run_radius_search, work_entry );
pts_left -= single_thread_limit;
start_idx += single_thread_limit;
}
msh_jobs_complete_all_work( hg_sd->work_ctx );
free( work_array );
return total_num_neighbors;
#endif
}
#else
return msh_hash_grid__radius_search( hg, hg_sd, 0, hg_sd->n_query_pts );
#endif
}
size_t msh_hash_grid_radius_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* hg_sd )
{
assert( hg_sd->query_pts );
assert( hg_sd->distances_sq );
assert( hg_sd->indices );
assert( hg_sd->radius > 0.0 );
assert( hg_sd->n_query_pts > 0 );
assert( hg_sd->max_n_neigh > 0 );
// Unpack the some useful data from structs
enum { MAX_BIN_COUNT = 512, MAX_THREAD_COUNT = 512 };
uint32_t n_query_pts = hg_sd->n_query_pts;
size_t row_size = hg_sd->max_n_neigh;
double radius = hg_sd->radius;
uint64_t slab_size = hg->_slab_size;
double cs = hg->cell_size;
double ics = hg->_inv_cell_size;
int64_t w = hg->width;
int64_t h = hg->height;
int64_t d = hg->depth;
double radius_sq = radius * radius;
uint32_t n_pts_per_thread = n_query_pts;
uint32_t total_num_neighbors = 0;
uint32_t num_neighbors_per_thread[MAX_THREAD_COUNT] = {0};
uint32_t num_threads = hg->_num_threads;
assert( num_threads <= MAX_THREAD_COUNT );
#if defined(_OPENMP)
#pragma omp parallel if (!hg->_dont_use_omp)
{
if( n_query_pts < num_threads ) { num_threads = n_query_pts; }
n_pts_per_thread = ceilf((float)n_query_pts / num_threads);
uint32_t thread_idx = omp_get_thread_num();
#else
for( uint32_t thread_idx = 0; thread_idx < num_threads; ++thread_idx )
{
#endif
if( thread_idx < num_threads )
{
uint32_t low_lim = thread_idx * n_pts_per_thread;
uint32_t high_lim = MSH_HG_MIN((thread_idx + 1) * n_pts_per_thread, n_query_pts);
uint32_t cur_n_pts = high_lim - low_lim;
float* query_pt = hg_sd->query_pts + low_lim * hg->_pts_dim;
size_t* n_neighbors = hg_sd->n_neighbors ? (hg_sd->n_neighbors + low_lim) : NULL;
float* dists_sq = hg_sd->distances_sq + (low_lim * row_size);
int32_t* indices = hg_sd->indices + (low_lim * row_size);
int32_t bin_indices[ MAX_BIN_COUNT ];
float bin_dists_sq[ MAX_BIN_COUNT ];
msh_hash_grid_dist_storage_t storage;
for( uint32_t pt_idx = 0; pt_idx < cur_n_pts; ++pt_idx )
{
// Prep the storage for the next point
msh_hash_grid_dist_storage_init( &storage, row_size, dists_sq, indices );
// Normalize query pt with respect to grid
msh_hg_v3_t q;
if( hg->_pts_dim == 2 )
{
q = (msh_hg_v3_t) { query_pt[0] - hg->min_pt.x,
query_pt[1] - hg->min_pt.y,
0.0 };
}
else
{
q = (msh_hg_v3_t) { query_pt[0] - hg->min_pt.x,
query_pt[1] - hg->min_pt.y,
query_pt[2] - hg->min_pt.z };
}
// Get base bin idx for query pt
int64_t ix = (int64_t)( q.x * ics );
int64_t iy = (int64_t)( q.y * ics );
int64_t iz = (int64_t)( q.z * ics );
// Decide where to look
int64_t px = (int64_t)( (q.x + radius) * ics );
int64_t nx = (int64_t)( (q.x - radius) * ics );
int64_t opx = px - ix;
int64_t onx = nx - ix;
int64_t py = (int64_t)( (q.y + radius) * ics );
int64_t ny = (int64_t)( (q.y - radius) * ics );
int64_t opy = py - iy;
int64_t ony = ny - iy;
int64_t pz = (int64_t)( (q.z + radius) * ics );
int64_t nz = (int64_t)( (q.z - radius) * ics );
int64_t opz = pz - iz;
int64_t onz = nz - iz;
uint32_t n_visited_bins = 0;
float dx, dy, dz;
int64_t cx, cy, cz;
for( int64_t oz = onz; oz <= opz; ++oz )
{
cz = (int64_t)iz + oz;
if( cz < 0 || cz >= d ) { continue; }
uint64_t idx_z = cz * slab_size;
if( oz < 0 ) { dz = q.z - (cz + 1) * cs; }
else if( oz > 0 ) { dz = cz * cs - q.z; }
else { dz = 0.0f; }
for( int64_t oy = ony; oy <= opy; ++oy )
{
cy = iy + oy;
if( cy < 0 || cy >= h ) { continue; }
uint64_t idx_y = cy * w;
if( oy < 0 ) { dy = q.y - (cy + 1) * cs; }
else if( oy > 0 ) { dy = cy * cs - q.y; }
else { dy = 0.0f; }
for( int64_t ox = onx; ox <= opx; ++ox )
{
cx = ix + ox;
if( cx < 0 || cx >= w ) { continue; }
// assert( n_visited_bins < MAX_BIN_COUNT );
if( n_visited_bins >= MAX_BIN_COUNT ) { goto msh_hash_grid_lbl__find_neighbors; }
bin_indices[n_visited_bins] = idx_z + idx_y + cx;
if( ox < 0 ) { dx = q.x - (cx + 1) * cs; }
else if( ox > 0 ) { dx = cx * cs - q.x; }
else { dx = 0.0f; }
bin_dists_sq[n_visited_bins] = dz * dz + dy * dy + dx * dx;
n_visited_bins++;
}
}
}
msh_hash_grid_lbl__find_neighbors:
msh_hash_grid__sort( bin_dists_sq, bin_indices, n_visited_bins );
for( uint32_t i = 0; i < n_visited_bins; ++i )
{
msh_hash_grid__find_neighbors_in_bin( hg, bin_indices[i], radius_sq, query_pt, &storage );
if( storage.len >= row_size &&
storage.max_dist <= bin_dists_sq[i] )
{
break;
}
}
if( hg_sd->sort ) { msh_hash_grid__sort( dists_sq, indices, storage.len ); }
if( n_neighbors ) { (*n_neighbors++) = storage.len; }
num_neighbors_per_thread[thread_idx] += storage.len;
// Advance pointers
dists_sq += row_size;
indices += row_size;
query_pt += hg->_pts_dim;
}
}
}
for( uint32_t i = 0 ; i < num_threads; ++i )
{
total_num_neighbors += num_neighbors_per_thread[i];
}
return total_num_neighbors;
}
MSH_HG_INLINE void
msh_hash_grid__add_bin_contents( const msh_hash_grid_t* hg, const uint64_t bin_idx,
const float* pt, msh_hash_grid_dist_storage_t* s )
{
uint64_t* bin_table_idx = msh_hg_map_get( hg->bin_table, bin_idx );
if( !bin_table_idx ) { return; }
msh_hg__bin_info_t bi = hg->offsets[*bin_table_idx];
int n_pts = bi.length;
const msh_hg_v3i_t* data = &hg->data_buffer[bi.offset];
for( int32_t i = 0; i < n_pts; ++i )
{
msh_hg_v3_t v;
if( hg->_pts_dim == 2 )
{
v = (msh_hg_v3_t){ data[i].x - pt[0], data[i].y - pt[1], 0.0 };
}
else
{
v = (msh_hg_v3_t){ data[i].x - pt[0], data[i].y - pt[1], data[i].z - pt[2] };
}
float dist_sq = v.x * v.x + v.y * v.y + v.z * v.z;
msh_hash_grid_dist_storage_push( s, dist_sq, data[i].i );
}
}
size_t msh_hash_grid_knn_search( const msh_hash_grid_t* hg,
msh_hash_grid_search_desc_t* hg_sd )
{
assert( hg_sd->query_pts );
assert( hg_sd->distances_sq );
assert( hg_sd->indices );
assert( hg_sd->n_query_pts > 0 );
assert( hg_sd->max_n_neigh > 0 );
assert( hg_sd->k > 0 );
// Unpack the some useful data from structs
enum { MAX_BIN_COUNT = 128, MAX_THREAD_COUNT = 128 };
uint32_t n_query_pts = hg_sd->n_query_pts;
uint32_t k = hg_sd->k;
uint64_t slab_size = hg->_slab_size;
int8_t sort = hg_sd->sort;
float cs = hg->cell_size;
int64_t w = hg->width;
int64_t h = hg->height;
int64_t d = hg->depth;
uint32_t n_pts_per_thread = n_query_pts;
uint32_t total_num_neighbors = 0;
uint32_t num_neighbors_per_thread[MAX_THREAD_COUNT] = {0};
uint32_t num_threads = hg->_num_threads;
assert( num_threads <= MAX_THREAD_COUNT );
#if defined(_OPENMP)
#pragma omp parallel if (!hg->_dont_use_omp)
{
if( n_query_pts < num_threads ) { num_threads = n_query_pts; }
uint32_t thread_idx = omp_get_thread_num();
#else
for( uint32_t thread_idx = 0; thread_idx < num_threads; ++thread_idx )
{
#endif
if( thread_idx < num_threads )
{
uint32_t low_lim = thread_idx * n_pts_per_thread;
uint32_t high_lim = MSH_HG_MIN((thread_idx + 1) * n_pts_per_thread, n_query_pts);
uint32_t cur_n_pts = high_lim - low_lim;
float *query_pt = hg_sd->query_pts + low_lim * hg->_pts_dim;
size_t* n_neighbors = hg_sd->n_neighbors ? (hg_sd->n_neighbors + low_lim) : NULL;
float* dists_sq = hg_sd->distances_sq + (low_lim * k);
int32_t* indices = hg_sd->indices + (low_lim * k);
int32_t bin_indices[ MAX_BIN_COUNT ];
msh_hash_grid_dist_storage_t storage;
for( uint32_t pt_idx = 0; pt_idx < cur_n_pts; ++pt_idx )
{
// Prep the storage for the next point
msh_hash_grid_dist_storage_init( &storage, k, dists_sq, indices );
// Normalize query pt with respect to grid
float dx, dy, dz;
int64_t cx, cy, cz;
int32_t layer = 0;
int8_t should_break = 0;
msh_hg_v3_t pt_prime;
if( hg->_pts_dim == 2 )
{
pt_prime = (msh_hg_v3_t) { query_pt[0] - hg->min_pt.x,
query_pt[1] - hg->min_pt.y,
0.0 };
}
else
{
pt_prime = (msh_hg_v3_t) { query_pt[0] - hg->min_pt.x,
query_pt[1] - hg->min_pt.y,
query_pt[2] - hg->min_pt.z };
}
// get base bin for query
uint64_t ix = (uint64_t)( (pt_prime.x) * hg->_inv_cell_size );
uint64_t iy = (uint64_t)( (pt_prime.y) * hg->_inv_cell_size );
uint64_t iz = (uint64_t)( (pt_prime.z) * hg->_inv_cell_size );
while( true )
{
int32_t inc_x = 1;
uint32_t n_visited_bins = 0;
for( int64_t oz = -layer; oz <= layer; oz++ )
{
cz = iz + oz;
if( cz < 0 || cz >= d ) continue;
uint64_t idx_z = cz * slab_size;
if( oz < 0 ) { dz = pt_prime.z - (cz + 1) * cs; }
else if( oz > 0 ) { dz = cz * cs - pt_prime.z; }
else { dz = 0.0f; }
for( int64_t oy = -layer; oy <= layer; oy++ )
{
cy = iy + oy;
if( cy < 0 || cy >= h ) continue;
uint64_t idx_y = cy * w;
if( oy < 0 ) { dy = pt_prime.y - (cy + 1) * cs; }
else if( oy > 0 ) { dy = cy * cs - pt_prime.y; }
else { dy = 0.0f; }
if( abs(oy) != layer && abs(oz) != layer ) { inc_x = 2 * layer; }
else { inc_x = 1; }
for( int64_t ox = -layer; ox <= layer; ox += inc_x )
{
cx = ix + ox;
if( cx < 0 || cx >= w ) continue;
if( ox < 0 ) { dx = pt_prime.x - (cx + 1) * cs; }
else if( ox > 0 ) { dx = cx * cs - pt_prime.x; }
else { dx = 0.0f; }
float dist_sq = dz * dz + dy * dy + dx * dx;
if( storage.len >= k &&
dist_sq > storage.max_dist ) { continue; }
assert( n_visited_bins < MAX_BIN_COUNT );
bin_indices[n_visited_bins] = idx_z + idx_y + cx;
n_visited_bins++;
}
}
}
for( uint32_t bin_idx = 0; bin_idx < n_visited_bins; ++bin_idx )
{
msh_hash_grid__add_bin_contents( hg, bin_indices[bin_idx], query_pt, &storage );
}
layer++;
if( should_break ) { break; }
if( storage.len >= k ) { should_break = true; }
}
if( n_neighbors ) { (*n_neighbors++) = storage.len; }
num_neighbors_per_thread[thread_idx] += storage.len;
if( sort ) { msh_hash_grid__sort( dists_sq, indices, storage.len ); }
// Advance pointers
dists_sq += k;
indices += k;
query_pt += hg->_pts_dim;
}
}
}
for( uint32_t i = 0 ; i < num_threads; ++i )
{
total_num_neighbors += num_neighbors_per_thread[i];
}
return total_num_neighbors;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// msh_array / msh_hg_map implementation
////////////////////////////////////////////////////////////////////////////////////////////////////
void*
msh_hg__array_grow(const void *array, size_t new_len, size_t elem_size) {
size_t old_cap = msh_hg_array_cap( array );
size_t new_cap = (size_t)msh_hg_array__grow_formula( old_cap );
new_cap = (size_t)MSH_HG_MAX( new_cap, MSH_HG_MAX(new_len, 16) );
size_t new_size = sizeof(msh_hg_array_hdr_t) + new_cap * elem_size;
msh_hg_array_hdr_t *new_hdr = NULL;
if( array ) {
new_hdr = (msh_hg_array_hdr_t*)MSH_HG_REALLOC( msh_hg_array__hdr( array ), new_size );
} else {
new_hdr = (msh_hg_array_hdr_t*)MSH_HG_MALLOC( new_size );
new_hdr->len = 0;
}
new_hdr->cap = new_cap;
return (void*)((char*)new_hdr + sizeof(msh_hg_array_hdr_t));
}
MSH_HG_INLINE uint64_t
msh_hg_hash_uint64(uint64_t x)
{
x *= 0xff51afd7ed558ccd;
x ^= x >> 32;
return x;
}
size_t
msh_hg_map__pow2ceil( uint32_t v )
{
--v;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
++v;
v += ( v == 0 );
return v;
}
void
msh_hg_map_init( msh_hg_map_t* map, uint32_t cap )
{
assert( !map->keys && !map->vals );
cap = msh_hg_map__pow2ceil( cap );
map->keys = (uint64_t*)MSH_HG_CALLOC( cap, sizeof(uint64_t) );
map->vals = (uint64_t*)MSH_HG_MALLOC( cap * sizeof(uint64_t) );
map->_len = 0;
map->_cap = cap;
}
void
msh_hg_map__grow( msh_hg_map_t *map, size_t new_cap) {
new_cap = msh_max( new_cap, 16 );
msh_hg_map_t new_map;
new_map.keys = (uint64_t*)MSH_HG_CALLOC( new_cap, sizeof(uint64_t) );
new_map.vals = (uint64_t*)MSH_HG_MALLOC( new_cap * sizeof(uint64_t) );
new_map._len = 0;
new_map._cap = new_cap;
for( size_t i = 0; i < map->_cap; i++ )
{
if( map->keys[i] )
{
msh_hg_map_insert( &new_map, map->keys[i] - 1, map->vals[i] );
}
}
MSH_HG_FREE( (void *)map->keys );
MSH_HG_FREE( map->vals );
*map = new_map;
}
size_t
msh_hg_map_len( msh_hg_map_t* map )
{
return map->_len;
}
size_t
msh_hg_map_cap( msh_hg_map_t* map )
{
return map->_cap;
}
void
msh_hg_map_insert( msh_hg_map_t* map, uint64_t key, uint64_t val )
{
key += 1;
if( 2 * map->_len >= map->_cap) { msh_hg_map__grow( map, 2 * map->_cap ); }
assert( 2 * map->_len < map->_cap );
size_t i = (size_t)key;
for (;;)
{
i &= map->_cap - 1;
if( !map->keys[i] )
{
map->_len++;
map->keys[i] = key;
map->vals[i] = val;
return;
}
else if( map->keys[i] == key )
{
map->vals[i] = val;
return;
}
i++;
}
}
uint64_t*
msh_hg_map_get( const msh_hg_map_t* map, uint64_t key )
{
if (map->_len == 0) { return NULL; }
key += 1;
size_t i = (size_t)key;
assert(map->_len < map->_cap);
for (;;) {
i &= map->_cap - 1;
if( map->keys[i] == key )
{
return &map->vals[i];
}
else if( !map->keys[i] )
{
return NULL;
}
i++;
}
}
void
msh_hg_map_free( msh_hg_map_t* map )
{
MSH_HG_FREE( map->keys );
MSH_HG_FREE( map->vals );
map->_cap = 0;
map->_len = 0;
}
#endif /* MSH_HASH_GRID_IMPLEMENTATION */
/*
------------------------------------------------------------------------------
This software is available under 2 licenses - you may choose the one you like.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT License
Copyright (c) 2018 Maciej Halber
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/ |
callback_openmp.c | // RUN: %clang_cc1 -triple i386-unknown-unknown -fopenmp %s -emit-llvm -o - -disable-llvm-optzns | FileCheck %s
// CHECK: declare !callback ![[cid:[0-9]+]] void @__kmpc_fork_call
// CHECK: declare !callback ![[cid]] void @__kmpc_fork_teams
// CHECK: ![[cid]] = !{![[cidb:[0-9]+]]}
// CHECK: ![[cidb]] = !{i64 2, i64 -1, i64 -1, i1 true}
void work1(int, int);
void work2(int, int);
void work12(int, int);
void foo(int q) {
int p = 2;
#pragma omp parallel firstprivate(q, p)
work1(p, q);
#pragma omp parallel for firstprivate(p, q)
for (int i = 0; i < q; i++)
work2(i, p);
#pragma omp target teams firstprivate(p)
work12(p, p);
}
|
StaticRendering.h | #pragma once
#include "ObjectExtensions.h"
#pragma region Static_Render
void setProjection(GLint w1, GLint h1)
{
omp_lock_t mtxLock;
omp_init_lock(&mtxLock);
omp_set_lock(&mtxLock);
GLfloat ratio;
// Prevent a divide by zero, when window is too short
// (you cant make a window of zero width).
#pragma omp atomic write
ratio = (1.0f * w1 / h1);
// Reset the coordinate system before modifying
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// Set the viewport to be the entire window
glViewport(0, 0, w1, h1);
// Set the clipping volume
gluPerspective(45, ratio, 0.1, 1000);
glMatrixMode(GL_MODELVIEW);
omp_unset_lock(&mtxLock);
omp_destroy_lock(&mtxLock);
}
void changeSize(GLint w1, GLint h1)
{
if (h1 == 0) { h1 = 1; }
#pragma omp atomic write
_openGLMV_.width = w1;
#pragma omp atomic write
_openGLMV_.height = h1;
// set topdownWindow as the active window
glutSetWindow(_openGLMV_.TopDownWindow);
// resize and reposition the sub window
glutPositionWindow(_openGLMV_.border, (_openGLMV_.height + _openGLMV_.border) / 40);
glutReshapeWindow(_openGLMV_.width - 20, _openGLMV_.height - 20);
setProjection(_openGLMV_.width / 2 - _openGLMV_.border * 3 / 2, _openGLMV_.height / 2 - _openGLMV_.border * 3 / 2);
}
#pragma endregion Static_Render
// --------------------------------------------------------------------------------
// STATIC RENDERING OBJECTS
// --------------------------------------------------------------------------------
//Static Drawcube object for rendering.
#pragma region Static_Objects
//Parallelize
void drawRadCircle()
{
#pragma omp atomic write
GLfloat pi_short = 3.14159f;
#pragma omp atomic write
GLfloat rad_short = 1000.0;
glBegin(GL_POINTS);
#pragma omp parallel for ordered schedule(dynamic)
for (int i = 0; i < (int)rad_short; ++i)
{
glVertex3f(cos(pi_short*i / rad_short), 0.0, sin(pi_short*i / rad_short));
}
glEnd();
}
void drawCircle()
{
omp_lock_t mtxLock;
omp_init_lock(&mtxLock);
omp_set_lock(&mtxLock);
glColor3f(0.0, 1.0, 0.0); // <R,G,B>
//Circle
glTranslatef(0.0f, 0.75f, 0.0f);
glutSolidSphere(0.10f, 10, 10);
omp_unset_lock(&mtxLock);
omp_destroy_lock(&mtxLock);
}
void drawTennisBall()
{
//Circle
glTranslatef(1.6f, 0.0f, 2.6f);
glutSolidSphere(0.10f, 5, 5);
}
//Static Drawcube object for rendering.
void drawRock()
{
omp_lock_t mtxLock;
omp_init_lock(&mtxLock);
omp_set_lock(&mtxLock);
//Generic cube size
GLdouble cube_size;
#pragma omp atomic write
cube_size = 0.20;
omp_unset_lock(&mtxLock);
// Red side - TOP
omp_set_lock(&mtxLock);
glBegin(GL_POLYGON);
glVertex3f(cube_size, cube_size, cube_size);
glVertex3f(cube_size, cube_size, -cube_size);
glVertex3f(-cube_size, cube_size, -cube_size);
glVertex3f(-cube_size, cube_size, cube_size);
glEnd();
omp_unset_lock(&mtxLock);
omp_destroy_lock(&mtxLock);
}
#pragma endregion Static_Objects
//END OF STATIC OBJECTS
//----------------------------------------------------------------------------------
|
convolutiondepthwise_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 25;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* r5 = img0 + w * 5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
float32x4_t _k16171819 = vld1q_f32(kernel0 + 16);
float32x4_t _k20212223 = vld1q_f32(kernel0 + 20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18
"mov v8.16b, %25.16b \n" // v8 = _bias0
"mov v9.16b, %25.16b \n" // v9 = _bias0
"0: \n"
"mov v10.16b, %25.16b \n" // v10 = _bias0
"mov v11.16b, %25.16b \n" // v11 = _bias0
"fmla v8.4s, v16.4s, %19.s[1] \n"
"fmla v10.4s, v16.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n" // r11
"fmla v9.4s, v17.4s, %19.s[1] \n"
"fmla v11.4s, v17.4s, %18.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n" // r15
"fmla v8.4s, v17.4s, %20.s[1] \n"
"fmla v10.4s, v17.4s, %19.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n" // r12
"fmla v9.4s, v18.4s, %20.s[1] \n"
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n" // r16
"fmla v8.4s, v19.4s, %19.s[2] \n"
"fmla v10.4s, v19.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n" // r13
"fmla v9.4s, v20.4s, %19.s[2] \n"
"fmla v11.4s, v20.4s, %18.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n" // r17
"fmla v8.4s, v21.4s, %19.s[3] \n"
"fmla v10.4s, v21.4s, %18.s[2] \n"
"add %4, %4, #32 \n"
"fmla v9.4s, v22.4s, %19.s[3] \n"
"fmla v11.4s, v22.4s, %18.s[2] \n"
// r2
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r20 r24 r28
"fmla v8.4s, v19.4s, %20.s[0] \n"
"fmla v10.4s, v19.4s, %18.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[0] \n"
"fmla v11.4s, v20.4s, %18.s[3] \n"
"add %5, %5, #32 \n"
"fmla v8.4s, v12.4s, %20.s[2] \n"
"fmla v10.4s, v12.4s, %19.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n" // r21
"fmla v9.4s, v13.4s, %20.s[2] \n"
"fmla v11.4s, v13.4s, %19.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n" // r25
"fmla v8.4s, v13.4s, %21.s[2] \n"
"fmla v10.4s, v13.4s, %20.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n" // r22
"fmla v9.4s, v14.4s, %21.s[2] \n"
"fmla v11.4s, v14.4s, %20.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n" // r26
"fmla v8.4s, v21.4s, %20.s[3] \n"
"fmla v10.4s, v21.4s, %19.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n" // r23
"fmla v9.4s, v22.4s, %20.s[3] \n"
"fmla v11.4s, v22.4s, %19.s[2] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n" // r27
"fmla v8.4s, v19.4s, %21.s[0] \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"fmla v9.4s, v20.4s, %21.s[0] \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r3
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r30 r34 r38
"fmla v8.4s, v21.4s, %21.s[1] \n"
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v9.4s, v22.4s, %21.s[1] \n"
"fmla v11.4s, v22.4s, %20.s[0] \n"
"add %6, %6, #32 \n"
"fmla v8.4s, v16.4s, %21.s[3] \n"
"fmla v10.4s, v16.4s, %20.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n" // r31
"fmla v9.4s, v17.4s, %21.s[3] \n"
"fmla v11.4s, v17.4s, %20.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n" // r35
"fmla v8.4s, v17.4s, %22.s[3] \n"
"fmla v10.4s, v17.4s, %21.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n" // r32
"fmla v9.4s, v18.4s, %22.s[3] \n"
"fmla v11.4s, v18.4s, %21.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n" // r36
"fmla v8.4s, v19.4s, %22.s[0] \n"
"fmla v10.4s, v19.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n" // r33
"fmla v9.4s, v20.4s, %22.s[0] \n"
"fmla v11.4s, v20.4s, %20.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n" // r37
"fmla v8.4s, v21.4s, %22.s[1] \n"
"fmla v10.4s, v21.4s, %21.s[0] \n"
"fmla v9.4s, v22.4s, %22.s[1] \n"
"fmla v11.4s, v22.4s, %21.s[0] \n"
// r4
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n" // v12 v13 v14 = r40 r44 r48
"fmla v8.4s, v19.4s, %22.s[2] \n"
"fmla v10.4s, v19.4s, %21.s[1] \n"
"add %7, %7, #32 \n"
"fmla v9.4s, v20.4s, %22.s[2] \n"
"fmla v11.4s, v20.4s, %21.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n" // r41
"fmla v8.4s, v12.4s, %23.s[0] \n"
"fmla v10.4s, v12.4s, %21.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #4 \n" // r45
"fmla v9.4s, v13.4s, %23.s[0] \n"
"fmla v11.4s, v13.4s, %21.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #8 \n" // r42
"fmla v8.4s, v13.4s, %24.s[0] \n"
"fmla v10.4s, v13.4s, %22.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #8 \n" // r46
"fmla v9.4s, v14.4s, %24.s[0] \n"
"fmla v11.4s, v14.4s, %22.s[3] \n"
// r0 and r5
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n" // v16 v17 v18 = r00 r04 r08
"fmla v8.4s, v21.4s, %23.s[1] \n"
"fmla v10.4s, v21.4s, %22.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #12 \n" // r43
"fmla v9.4s, v22.4s, %23.s[1] \n"
"fmla v11.4s, v22.4s, %22.s[0] \n"
"ext v22.16b, v13.16b, v14.16b, #12 \n" // r47
"fmla v8.4s, v19.4s, %23.s[2] \n"
"fmla v10.4s, v19.4s, %22.s[1] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n" // v12 v13 v14 = r50 r54 r58
"fmla v9.4s, v20.4s, %23.s[2] \n"
"fmla v11.4s, v20.4s, %22.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n" // r01
"fmla v8.4s, v21.4s, %23.s[3] \n"
"fmla v10.4s, v21.4s, %22.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #4 \n" // r51
"fmla v9.4s, v22.4s, %23.s[3] \n"
"fmla v11.4s, v22.4s, %22.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n" // r05
"fmla v8.4s, v16.4s, %18.s[0] \n"
"fmla v10.4s, v12.4s, %23.s[0] \n"
"ext v24.16b, v13.16b, v14.16b, #4 \n" // r55
"fmla v9.4s, v17.4s, %18.s[0] \n"
"fmla v11.4s, v13.4s, %23.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n" // r02
"fmla v8.4s, v17.4s, %19.s[0] \n"
"fmla v10.4s, v13.4s, %24.s[0] \n"
"ext v25.16b, v12.16b, v13.16b, #8 \n" // r52
"fmla v9.4s, v18.4s, %19.s[0] \n"
"fmla v11.4s, v14.4s, %24.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n" // r06
"fmla v8.4s, v19.4s, %18.s[1] \n"
"fmla v10.4s, v23.4s, %23.s[1] \n"
"ext v26.16b, v13.16b, v14.16b, #8 \n" // r56
"fmla v9.4s, v20.4s, %18.s[1] \n"
"fmla v11.4s, v24.4s, %23.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n" // r03
"fmla v8.4s, v21.4s, %18.s[2] \n"
"fmla v10.4s, v25.4s, %23.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n" // r53
"fmla v9.4s, v22.4s, %18.s[2] \n"
"fmla v11.4s, v26.4s, %23.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n" // r07
"fmla v8.4s, v19.4s, %18.s[3] \n"
"fmla v10.4s, v23.4s, %23.s[3] \n"
"ext v24.16b, v13.16b, v14.16b, #12 \n" // r57
"fmla v9.4s, v20.4s, %18.s[3] \n"
"add %3, %3, #32 \n"
"fmla v11.4s, v24.4s, %23.s[3] \n"
"add %8, %8, #32 \n"
// r1
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18
"subs %w0, %w0, #1 \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"mov v8.16b, %25.16b \n" // v8 = _bias0
"mov v9.16b, %25.16b \n" // v9 = _bias0
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26");
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4s, v13.4s}, [%3] \n" // v12 v13 = r10 r14
"mov v8.16b, %23.16b \n" // v8 = _bias0
"mov v9.16b, %23.16b \n" // v9 = _bias0
"fmul v10.4s, v12.4s, %17.s[1] \n"
"fmul v11.4s, v12.4s, %16.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n" // r11
"fmla v8.4s, v13.4s, %18.s[1] \n"
"fmla v9.4s, v13.4s, %17.s[0] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n" // r12
"fmla v10.4s, v21.4s, %17.s[2] \n"
"fmla v11.4s, v21.4s, %16.s[1] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n" // r13
"fmla v8.4s, v22.4s, %17.s[3] \n"
"fmla v9.4s, v22.4s, %16.s[2] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4s, v17.4s}, [%4] \n" // v16 v17 = r20 r24
"fmla v10.4s, v23.4s, %18.s[0] \n"
"fmla v11.4s, v23.4s, %16.s[3] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v16.4s, %18.s[2] \n"
"fmla v9.4s, v16.4s, %17.s[1] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n" // r21
"fmla v10.4s, v17.4s, %19.s[2] \n"
"fmla v11.4s, v17.4s, %18.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n" // r22
"fmla v8.4s, v18.4s, %18.s[3] \n"
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n" // r23
"fmla v10.4s, v19.4s, %19.s[0] \n"
"fmla v11.4s, v19.4s, %17.s[3] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v12.4s, v13.4s}, [%5] \n" // v12 v13 = r30 r34
"fmla v8.4s, v20.4s, %19.s[1] \n"
"fmla v9.4s, v20.4s, %18.s[0] \n"
"add %5, %5, #16 \n"
"fmla v10.4s, v12.4s, %19.s[3] \n"
"fmla v11.4s, v12.4s, %18.s[2] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n" // r31
"fmla v8.4s, v13.4s, %20.s[3] \n"
"fmla v9.4s, v13.4s, %19.s[2] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n" // r32
"fmla v10.4s, v21.4s, %20.s[0] \n"
"fmla v11.4s, v21.4s, %18.s[3] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n" // r33
"fmla v8.4s, v22.4s, %20.s[1] \n"
"fmla v9.4s, v22.4s, %19.s[0] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4s, v17.4s}, [%6] \n" // v16 v17 = r40 r44
"fmla v10.4s, v23.4s, %20.s[2] \n"
"fmla v11.4s, v23.4s, %19.s[1] \n"
"add %6, %6, #16 \n"
"fmla v8.4s, v16.4s, %21.s[0] \n"
"fmla v9.4s, v16.4s, %19.s[3] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n" // r41
"fmla v10.4s, v17.4s, %22.s[0] \n"
"fmla v11.4s, v17.4s, %20.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n" // r42
"fmla v8.4s, v18.4s, %21.s[1] \n"
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n" // r43
"fmla v10.4s, v19.4s, %21.s[2] \n"
"fmla v11.4s, v19.4s, %20.s[1] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4s, v17.4s}, [%2] \n" // v16 v17 = r00 r04
"fmla v8.4s, v20.4s, %21.s[3] \n"
"fmla v9.4s, v20.4s, %20.s[2] \n"
// r5
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v12.4s, v13.4s}, [%7] \n" // v12 v13 = r50 r54
"fmla v10.4s, v16.4s, %16.s[0] \n"
"fmla v11.4s, v12.4s, %21.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n" // r01
"fmla v8.4s, v17.4s, %17.s[0] \n"
"ext v21.16b, v12.16b, v13.16b, #4 \n" // r51
"fmla v9.4s, v13.4s, %22.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n" // r02
"fmla v10.4s, v18.4s, %16.s[1] \n"
"ext v22.16b, v12.16b, v13.16b, #8 \n" // r52
"fmla v11.4s, v21.4s, %21.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n" // r03
"fmla v8.4s, v19.4s, %16.s[2] \n"
"ext v23.16b, v12.16b, v13.16b, #12 \n" // r53
"fmla v9.4s, v22.4s, %21.s[2] \n"
"add %3, %3, #16 \n"
"fmla v10.4s, v20.4s, %16.s[3] \n"
"fmla v11.4s, v23.4s, %21.s[3] \n"
"add %2, %2, #16 \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"add %7, %7, #16 \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
: "=r"(outptr), // %0
"=r"(outptr2), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr),
"1"(outptr2),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k0123), // %16
"w"(_k4567), // %17
"w"(_k891011), // %18
"w"(_k12131415), // %19
"w"(_k16171819), // %20
"w"(_k20212223), // %21
"w"(_k24242424), // %22
"w"(_bias0) // %23
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
#else
if (nn > 0)
{
asm volatile(
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14
"vmov q8, %q25 \n" // q8 = _bias0
"0: \n"
"vmov q9, %q25 \n" // q9 = _bias0
"vmla.f32 q8, q14, %e19[1] \n"
"vmla.f32 q9, q14, %e18[0] \n"
"vext.32 q12, q14, q15, #1 \n" // r11
"vmla.f32 q8, q15, %e20[1] \n"
"vmla.f32 q9, q15, %e19[0] \n"
"vext.32 q13, q14, q15, #2 \n" // r12
"vmla.f32 q8, q12, %f19[0] \n"
"vmla.f32 q9, q12, %e18[1] \n"
"vext.32 q12, q14, q15, #3 \n" // r13
"vmla.f32 q8, q13, %f19[1] \n"
"vmla.f32 q9, q13, %f18[0] \n"
// r2
"pld [%5, #256] \n"
"vld1.f32 {d20-d23}, [%5] \n" // q10 q11 = r20 r24
"vmla.f32 q8, q12, %e20[0] \n"
"vmla.f32 q9, q12, %f18[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q10, %f20[0] \n"
"vmla.f32 q9, q10, %e19[1] \n"
"vext.32 q12, q10, q11, #1 \n" // r21
"vmla.f32 q8, q11, %f21[0] \n"
"vmla.f32 q9, q11, %e20[1] \n"
"vext.32 q13, q10, q11, #2 \n" // r22
"vmla.f32 q8, q12, %f20[1] \n"
"vmla.f32 q9, q12, %f19[0] \n"
"vext.32 q12, q10, q11, #3 \n" // r23
"vmla.f32 q8, q13, %e21[0] \n"
"vmla.f32 q9, q13, %f19[1] \n"
// r3
"pld [%6, #256] \n"
"vld1.f32 {d28-d31}, [%6] \n" // q14 q15 = r30 r34
"vmla.f32 q8, q12, %e21[1] \n"
"vmla.f32 q9, q12, %e20[0] \n"
"add %6, #16 \n"
"vmla.f32 q8, q14, %f21[1] \n"
"vmla.f32 q9, q14, %f20[0] \n"
"vext.32 q12, q14, q15, #1 \n" // r31
"vmla.f32 q8, q15, %f22[1] \n"
"vmla.f32 q9, q15, %f21[0] \n"
"vext.32 q13, q14, q15, #2 \n" // r32
"vmla.f32 q8, q12, %e22[0] \n"
"vmla.f32 q9, q12, %f20[1] \n"
"vext.32 q12, q14, q15, #3 \n" // r33
"vmla.f32 q8, q13, %e22[1] \n"
"vmla.f32 q9, q13, %e21[0] \n"
// r4
"pld [%7, #256] \n"
"vld1.f32 {d20-d23}, [%7] \n" // q10 q11 = r40 r44
"vmla.f32 q8, q12, %f22[0] \n"
"vmla.f32 q9, q12, %e21[1] \n"
"add %7, #16 \n"
"vmla.f32 q8, q10, %e23[0] \n"
"vmla.f32 q9, q10, %f21[1] \n"
"vext.32 q12, q10, q11, #1 \n" // r41
"vmla.f32 q8, q11, %e24[0] \n"
"vmla.f32 q9, q11, %f22[1] \n"
"vext.32 q13, q10, q11, #2 \n" // r42
"vmla.f32 q8, q12, %e23[1] \n"
"vmla.f32 q9, q12, %e22[0] \n"
"vext.32 q12, q10, q11, #3 \n" // r43
"vmla.f32 q8, q13, %f23[0] \n"
"vmla.f32 q9, q13, %e22[1] \n"
// r0 and r5
"pld [%3, #256] \n"
"vld1.f32 {d20-d23}, [%3] \n" // q10 q11 = r00 r04
"vmla.f32 q8, q12, %f23[1] \n"
"vmla.f32 q9, q12, %f22[0] \n"
// r5
"pld [%8, #256] \n"
"vld1.f32 {d28-d31}, [%8] \n" // q14 q15 = r50 r54
"vmla.f32 q8, q10, %e18[0] \n"
"vmla.f32 q9, q14, %e23[0] \n"
"vext.32 q12, q10, q11, #1 \n" // r01
"vmla.f32 q8, q11, %e19[0] \n"
"vmla.f32 q9, q15, %e24[0] \n"
"vext.32 q13, q14, q15, #1 \n" // r51
"vmla.f32 q8, q12, %e18[1] \n"
"vext.32 q12, q10, q11, #2 \n" // r02
"vmla.f32 q9, q13, %e23[1] \n"
"vext.32 q13, q14, q15, #2 \n" // r52
"vmla.f32 q8, q12, %f18[0] \n"
"vext.32 q12, q10, q11, #3 \n" // r03
"vmla.f32 q9, q13, %f23[0] \n"
"vext.32 q13, q14, q15, #3 \n" // r33
"vmla.f32 q8, q12, %f18[1] \n"
"add %3, #16 \n"
"vmla.f32 q9, q13, %f23[1] \n"
"add %4, #16 \n"
// r1
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14
"add %8, #16 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vmov q8, %q25 \n" // q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424), // %24
"w"(_bias0) // %25
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = bias0;
float sum2 = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4 = {};
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4 = {};
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 += r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v10 v11
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08
"mov v8.16b, %21.16b \n" // v8 = _bias0
"mov v9.16b, %21.16b \n" // v9 = _bias0
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n" // r01
"fmul v11.4s, v17.4s, %14.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n" // r05
"fmla v8.4s, v17.4s, %15.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n" // r02
"fmla v9.4s, v18.4s, %15.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n" // r06
"fmla v10.4s, v19.4s, %14.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n" // r03
"fmla v11.4s, v20.4s, %14.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n" // r07
"fmla v8.4s, v21.4s, %14.s[2] \n"
"fmla v9.4s, v22.4s, %14.s[2] \n"
// r1
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n" // v12 v13 v14 = r10 r14 r18
"fmla v10.4s, v19.4s, %14.s[3] \n"
"fmla v11.4s, v20.4s, %14.s[3] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n" // r11
"fmla v9.4s, v13.4s, %15.s[1] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n" // r15
"fmla v10.4s, v13.4s, %16.s[1] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n" // r12
"fmla v11.4s, v14.4s, %16.s[1] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n" // r16
"fmla v8.4s, v19.4s, %15.s[2] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n" // r13
"fmla v9.4s, v20.4s, %15.s[2] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n" // r17
"fmla v10.4s, v21.4s, %15.s[3] \n"
"fmla v11.4s, v22.4s, %15.s[3] \n"
// r2
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r20 r24 r28
"fmla v8.4s, v19.4s, %16.s[0] \n"
"fmla v9.4s, v20.4s, %16.s[0] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n" // r21
"fmla v11.4s, v17.4s, %16.s[2] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n" // r25
"fmla v8.4s, v17.4s, %17.s[2] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n" // r22
"fmla v9.4s, v18.4s, %17.s[2] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n" // r26
"fmla v10.4s, v19.4s, %16.s[3] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n" // r23
"fmla v11.4s, v20.4s, %16.s[3] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n" // r27
"fmla v8.4s, v21.4s, %17.s[0] \n"
"fmla v9.4s, v22.4s, %17.s[0] \n"
// r3
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r30 r34 r38
"fmla v10.4s, v19.4s, %17.s[1] \n"
"fmla v11.4s, v20.4s, %17.s[1] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"ext v19.16b, v12.16b, v13.16b, #4 \n" // r11
"fmla v9.4s, v13.4s, %17.s[3] \n"
"ext v20.16b, v13.16b, v14.16b, #4 \n" // r15
"fmla v10.4s, v13.4s, %18.s[3] \n"
"ext v21.16b, v12.16b, v13.16b, #8 \n" // r12
"fmla v11.4s, v14.4s, %18.s[3] \n"
"ext v22.16b, v13.16b, v14.16b, #8 \n" // r16
"fmla v8.4s, v19.4s, %18.s[0] \n"
"ext v19.16b, v12.16b, v13.16b, #12 \n" // r13
"fmla v9.4s, v20.4s, %18.s[0] \n"
"ext v20.16b, v13.16b, v14.16b, #12 \n" // r17
"fmla v10.4s, v21.4s, %18.s[1] \n"
"fmla v11.4s, v22.4s, %18.s[1] \n"
// r4
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r40 r44 r48
"fmla v8.4s, v19.4s, %18.s[2] \n"
"fmla v9.4s, v20.4s, %18.s[2] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #4 \n" // r41
"fmla v11.4s, v17.4s, %19.s[0] \n"
"ext v20.16b, v17.16b, v18.16b, #4 \n" // r45
"fmla v8.4s, v17.4s, %20.s[0] \n"
"ext v21.16b, v16.16b, v17.16b, #8 \n" // r42
"fmla v9.4s, v18.4s, %20.s[0] \n"
"ext v22.16b, v17.16b, v18.16b, #8 \n" // r46
"fmla v10.4s, v19.4s, %19.s[1] \n"
"ext v19.16b, v16.16b, v17.16b, #12 \n" // r43
"fmla v11.4s, v20.4s, %19.s[1] \n"
"ext v20.16b, v17.16b, v18.16b, #12 \n" // r47
"fmla v8.4s, v21.4s, %19.s[2] \n"
"add %2, %2, #32 \n"
"fmla v9.4s, v22.4s, %19.s[2] \n"
"add %3, %3, #32 \n"
"fmla v10.4s, v19.4s, %19.s[3] \n"
"add %4, %4, #32 \n"
"fmla v11.4s, v20.4s, %19.s[3] \n"
// r0
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08
"add %5, %5, #32 \n"
"fadd v10.4s, v8.4s, v10.4s \n"
"add %6, %6, #32 \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"mov v8.16b, %21.16b \n" // v8 = _bias0
"mov v9.16b, %21.16b \n" // v9 = _bias0
"subs %w0, %w0, #1 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22");
}
if (remain >= 4)
{
remain -= 4;
asm volatile(
// r0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4s, v17.4s}, [%1] \n" // v16 v17 = r00 r04
"mov v8.16b, %19.16b \n" // v8 = _bias0
"add %1, %1, #16 \n"
"fmul v9.4s, v16.4s, %12.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n" // r01
"fmla v8.4s, v17.4s, %13.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n" // r02
"fmla v9.4s, v18.4s, %12.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n" // r03
"fmla v8.4s, v19.4s, %12.s[2] \n"
// r1
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n" // v10 v11 = r10 r14
"fmla v9.4s, v20.4s, %12.s[3] \n"
"add %2, %2, #16 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n" // r11
"fmla v9.4s, v11.4s, %14.s[1] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n" // r12
"fmla v8.4s, v12.4s, %13.s[2] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n" // r13
"fmla v9.4s, v13.4s, %13.s[3] \n"
// r2
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4s, v17.4s}, [%3] \n" // v16 v17 = r20 r24
"fmla v8.4s, v14.4s, %14.s[0] \n"
"add %3, %3, #16 \n"
"fmla v9.4s, v16.4s, %14.s[2] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n" // r21
"fmla v8.4s, v17.4s, %15.s[2] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n" // r22
"fmla v9.4s, v18.4s, %14.s[3] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n" // r23
"fmla v8.4s, v19.4s, %15.s[0] \n"
// r3
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4] \n" // v10 v11 = r30 r34
"fmla v9.4s, v20.4s, %15.s[1] \n"
"add %4, %4, #16 \n"
"fmla v8.4s, v10.4s, %15.s[3] \n"
"ext v12.16b, v10.16b, v11.16b, #4 \n" // r31
"fmla v9.4s, v11.4s, %16.s[3] \n"
"ext v13.16b, v10.16b, v11.16b, #8 \n" // r32
"fmla v8.4s, v12.4s, %16.s[0] \n"
"ext v14.16b, v10.16b, v11.16b, #12 \n" // r33
"fmla v9.4s, v13.4s, %16.s[1] \n"
// r4
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4s, v17.4s}, [%5] \n" // v16 v17 = r40 r44
"fmla v8.4s, v14.4s, %16.s[2] \n"
"add %5, %5, #16 \n"
"fmla v9.4s, v16.4s, %17.s[0] \n"
"ext v18.16b, v16.16b, v17.16b, #4 \n" // r41
"fmla v8.4s, v17.4s, %18.s[0] \n"
"ext v19.16b, v16.16b, v17.16b, #8 \n" // r42
"fmla v9.4s, v18.4s, %17.s[1] \n"
"ext v20.16b, v16.16b, v17.16b, #12 \n" // r43
"fmla v8.4s, v19.4s, %17.s[2] \n"
"fmla v9.4s, v20.4s, %17.s[3] \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20");
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04
"vmov q8, %q21 \n" // q8 = _bias0
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vext.32 q12, q10, q11, #1 \n" // r01
"vmla.f32 q8, q11, %e15[0] \n"
"vext.32 q13, q10, q11, #2 \n" // r02
"vmla.f32 q9, q12, %e14[1] \n"
"vext.32 q12, q10, q11, #3 \n" // r03
"vmla.f32 q8, q13, %f14[0] \n"
// r1
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3] \n" // q14 q15 = r10 r14
"vmla.f32 q9, q12, %f14[1] \n"
"add %3, #16 \n"
"vmla.f32 q8, q14, %e15[1] \n"
"vext.32 q12, q14, q15, #1 \n" // r11
"vmla.f32 q9, q15, %e16[1] \n"
"vext.32 q13, q14, q15, #2 \n" // r12
"vmla.f32 q8, q12, %f15[0] \n"
"vext.32 q12, q14, q15, #3 \n" // r13
"vmla.f32 q9, q13, %f15[1] \n"
// r2
"pld [%4, #256] \n"
"vld1.f32 {d20-d23}, [%4] \n" // q10 q11 = r20 r24
"vmla.f32 q8, q12, %e16[0] \n"
"add %4, #16 \n"
"vmla.f32 q9, q10, %f16[0] \n"
"vext.32 q12, q10, q11, #1 \n" // r21
"vmla.f32 q8, q11, %f17[0] \n"
"vext.32 q13, q10, q11, #2 \n" // r22
"vmla.f32 q9, q12, %f16[1] \n"
"vext.32 q12, q10, q11, #3 \n" // r23
"vmla.f32 q8, q13, %e17[0] \n"
// r3
"pld [%5, #256] \n"
"vld1.f32 {d28-d31}, [%5] \n" // q14 q15 = r30 r34
"vmla.f32 q9, q12, %e17[1] \n"
"add %5, #16 \n"
"vmla.f32 q8, q14, %f17[1] \n"
"vext.32 q12, q14, q15, #1 \n" // r31
"vmla.f32 q9, q15, %f18[1] \n"
"vext.32 q13, q14, q15, #2 \n" // r32
"vmla.f32 q8, q12, %e18[0] \n"
"vext.32 q12, q14, q15, #3 \n" // r33
"vmla.f32 q9, q13, %e18[1] \n"
// r4
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6] \n" // q10 q11 = r40 r44
"vmla.f32 q8, q12, %f18[0] \n"
"add %6, #16 \n"
"vmla.f32 q9, q10, %e19[0] \n"
"vext.32 q12, q10, q11, #1 \n" // r41
"vmla.f32 q8, q11, %e20[0] \n"
"vext.32 q13, q10, q11, #2 \n" // r42
"vmla.f32 q9, q12, %e19[1] \n"
"vext.32 q12, q10, q11, #3 \n" // r43
"vmla.f32 q8, q13, %f19[0] \n"
"add %2, #16 \n"
"vmla.f32 q9, q12, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04
"vadd.f32 q9, q9, q8 \n"
"vmov q8, %q21 \n" // q8 = _bias0
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
#if __aarch64__
// TODO neon assembly optimize
float sum = bias0;
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4 = {};
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4 = {};
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#else
// TODO neon assembly optimize
asm volatile(
"veor q14, q14 \n"
"vext.32 q14, %q19, q14, #3 \n" // q14 = bias0 0 0 0
"vld1.f32 {d16-d17}, [%1] \n" // q8 = r00 r01 r02 r03
"vld1.f32 {d18-d19}, [%2] \n" // q9 = r10 r11 r12 r13(X)
"add r4, %1, #16 \n"
"vld1.f32 {d19[1]}, [r4] \n"
"vext.32 q9, q9, q9, #3 \n" // q9 = r04 r10 r11 r12
"vmla.f32 q14, q8, %q12 \n"
"add r4, %2, #12 \n"
"vld1.f32 {d20}, [r4] \n" // d20 = r13 r14
"vld1.f32 {d21}, [%3] \n" // d21 = r20 r21
"vmla.f32 q14, q9, %q13 \n"
"add r4, %3, #8 \n"
"vld1.f32 {d22-d23}, [r4] \n" // q11 = r22 r23 r24 X
"vld1.f32 {d23[1]}, [%4] \n" // q11 = r22 r23 r24 r30
"vmla.f32 q14, q10, %q14 \n"
"add r4, %4, #4 \n"
"vld1.f32 {d24-d25}, [r4] \n" // q12 = r31 r32 r33 r34
"vmla.f32 q14, q11, %q15 \n"
"vld1.f32 {d26-d27}, [%5] \n" // q13 = r40 r41 r42 r43
"vmla.f32 q14, q12, %q16 \n"
"veor d30, d30 \n"
"add r4, %5, #16 \n"
"vld1.f32 {d30[0]}, [r4] \n" // d30 = r44 0
"vmla.f32 q14, q13, %q17 \n"
"vmla.f32 d28, d30, %e18 \n"
"add %1, #4 \n"
// h-sum
"vadd.f32 d28, d28, d29 \n"
"add %2, #4 \n"
"add %3, #4 \n"
"vpadd.f32 d28, d28, d28 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vst1.f32 {d28[0]}, [%0]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415), // %15
"w"(_k16171819), // %16
"w"(_k20212223), // %17
"w"(_k24242424), // %18
"w"(_bias0) // %19
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
#endif
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
//int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
//int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 25;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
float32x4_t _k16171819 = vld1q_f32(kernel0 + 16);
float32x4_t _k20212223 = vld1q_f32(kernel0 + 20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
float32x4_t _bias0 = vdupq_n_f32(bias0);
#endif // __ARM_NEON
int i = 0;
// NOTE unroll outh 2 results somewhat speed drop :| (about -4%)
// so we do not implement it here
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01
"mov v8.16b, %21.16b \n" // v8 = _bias0
"mov v9.16b, %21.16b \n" // v9 = _bias0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09
"0: \n"
"fmul v10.4s, v16.4s, %14.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v20.4s, v21.4s}, [%2] \n" // v20 v21 = r016 r017
"fmul v11.4s, v18.4s, %14.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r02
"fmla v8.4s, v17.4s, %14.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r010
"fmla v9.4s, v19.4s, %14.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r03
"fmla v10.4s, v22.4s, %14.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r011
"fmla v11.4s, v25.4s, %14.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r04
"fmla v8.4s, v23.4s, %14.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r012
"fmla v9.4s, v26.4s, %14.s[3] \n"
// r1
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v12.4s, v13.4s}, [%3], #32 \n" // v12 v13 = r10 r11
"fmla v10.4s, v24.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v14.4s, v15.4s}, [%3], #32 \n" // v14 v15 = r18 r19
"fmla v11.4s, v27.4s, %15.s[0] \n"
"fmla v8.4s, v12.4s, %15.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v20.4s, v21.4s}, [%3] \n" // v20 v21 = r116 r117
"fmla v9.4s, v14.4s, %15.s[1] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r12
"fmla v10.4s, v13.4s, %15.s[2] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r110
"fmla v11.4s, v15.4s, %15.s[2] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r13
"fmla v8.4s, v22.4s, %15.s[3] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r111
"fmla v9.4s, v25.4s, %15.s[3] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r14
"fmla v10.4s, v23.4s, %16.s[0] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r112
"fmla v11.4s, v26.4s, %16.s[0] \n"
// r2
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v16.4s, v17.4s}, [%4], #32 \n" // v16 v17 = r20 r21
"fmla v8.4s, v24.4s, %16.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v18.4s, v19.4s}, [%4], #32 \n" // v18 v19 = r28 r29
"fmla v9.4s, v27.4s, %16.s[1] \n"
"fmla v10.4s, v16.4s, %16.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v20.4s, v21.4s}, [%4] \n" // v20 v21 = r216 r217
"fmla v11.4s, v18.4s, %16.s[2] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r22
"fmla v8.4s, v17.4s, %16.s[3] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r210
"fmla v9.4s, v19.4s, %16.s[3] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r23
"fmla v10.4s, v22.4s, %17.s[0] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r211
"fmla v11.4s, v25.4s, %17.s[0] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r24
"fmla v8.4s, v23.4s, %17.s[1] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r212
"fmla v9.4s, v26.4s, %17.s[1] \n"
// r3
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v12.4s, v13.4s}, [%5], #32 \n" // v12 v13 = r30 r31
"fmla v10.4s, v24.4s, %17.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v14.4s, v15.4s}, [%5], #32 \n" // v14 v15 = r38 r39
"fmla v11.4s, v27.4s, %17.s[2] \n"
"fmla v8.4s, v12.4s, %17.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v20.4s, v21.4s}, [%5] \n" // v20 v21 = r316 r317
"fmla v9.4s, v14.4s, %17.s[3] \n"
"ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r32
"fmla v10.4s, v13.4s, %18.s[0] \n"
"ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r310
"fmla v11.4s, v15.4s, %18.s[0] \n"
"ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r33
"fmla v8.4s, v22.4s, %18.s[1] \n"
"ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r311
"fmla v9.4s, v25.4s, %18.s[1] \n"
"ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r34
"fmla v10.4s, v23.4s, %18.s[2] \n"
"ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r312
"fmla v11.4s, v26.4s, %18.s[2] \n"
// r4
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v16.4s, v17.4s}, [%6], #32 \n" // v16 v17 = r40 r41
"fmla v8.4s, v24.4s, %18.s[3] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v18.4s, v19.4s}, [%6], #32 \n" // v18 v19 = r48 r49
"fmla v9.4s, v27.4s, %18.s[3] \n"
"fmla v10.4s, v16.4s, %19.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v20.4s, v21.4s}, [%6] \n" // v20 v21 = r416 r417
"fmla v11.4s, v18.4s, %19.s[0] \n"
"ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r42
"fmla v8.4s, v17.4s, %19.s[1] \n"
"ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r410
"fmla v9.4s, v19.4s, %19.s[1] \n"
"ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r43
"fmla v10.4s, v22.4s, %19.s[2] \n"
"ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r411
"fmla v11.4s, v25.4s, %19.s[2] \n"
"ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r44
"fmla v8.4s, v23.4s, %19.s[3] \n"
"ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r412
"fmla v9.4s, v26.4s, %19.s[3] \n"
"fmla v10.4s, v24.4s, %20.s[0] \n"
// r0
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01
"fmla v11.4s, v27.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09
"fadd v10.4s, v8.4s, v10.4s \n"
"fadd v11.4s, v9.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"mov v8.16b, %21.16b \n" // v8 = _bias0
"mov v9.16b, %21.16b \n" // v9 = _bias0
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"bne 0b \n"
"sub %2, %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#else
if (nn > 0)
{
asm volatile(
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01
"vmov q8, %q21 \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x
"0: \n"
"vmul.f32 q9, q10, %e14[0] \n"
"vmov d26, d25 \n" // q13 = r09 x x
"vext.32 q14, q10, q12, #1 \n" // q14 = r02
"vmla.f32 q8, q11, %e14[1] \n"
"vext.32 q15, q11, q13, #1 \n" // q15 = r03
"vmla.f32 q9, q14, %f14[0] \n"
"vext.32 q14, q10, q12, #2 \n" // q14 = r04
"vmla.f32 q8, q15, %f14[1] \n"
// r1
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3]! \n" // q10 q11 = r10 r11
"vmla.f32 q9, q14, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d24-d25}, [%3] \n" // q12 = r18 x x
"vmla.f32 q8, q10, %e15[1] \n"
"vmov d26, d25 \n" // q13 = r19 x x
"vext.32 q14, q10, q12, #1 \n" // q14 = r12
"vmla.f32 q9, q11, %f15[0] \n"
"vext.32 q15, q11, q13, #1 \n" // q15 = r13
"vmla.f32 q8, q14, %f15[1] \n"
"vext.32 q14, q10, q12, #2 \n" // q14 = r14
"vmla.f32 q9, q15, %e16[0] \n"
// r2
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4]! \n" // q10 q11 = r20 r21
"vmla.f32 q8, q14, %e16[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d24-d25}, [%4] \n" // q12 = r28 x x
"vmla.f32 q9, q10, %f16[0] \n"
"vmov d26, d25 \n" // q13 = r29 x x
"vext.32 q14, q10, q12, #1 \n" // q14 = r22
"vmla.f32 q8, q11, %f16[1] \n"
"vext.32 q15, q11, q13, #1 \n" // q15 = r23
"vmla.f32 q9, q14, %e17[0] \n"
"vext.32 q14, q10, q12, #2 \n" // q14 = r24
"vmla.f32 q8, q15, %e17[1] \n"
// r3
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5]! \n" // q10 q11 = r30 r31
"vmla.f32 q9, q14, %f17[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d24-d25}, [%5] \n" // q12 = r38 x x
"vmla.f32 q8, q10, %f17[1] \n"
"vmov d26, d25 \n" // q13 = r39 x x
"vext.32 q14, q10, q12, #1 \n" // q14 = r32
"vmla.f32 q9, q11, %e18[0] \n"
"vext.32 q15, q11, q13, #1 \n" // q15 = r33
"vmla.f32 q8, q14, %e18[1] \n"
"vext.32 q14, q10, q12, #2 \n" // q14 = r34
"vmla.f32 q9, q15, %f18[0] \n"
// r4
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6]! \n" // q10 q11 = r40 r41
"vmla.f32 q8, q14, %f18[1] \n"
"pld [%6, #128] \n"
"vld2.f32 {d24-d25}, [%6] \n" // q12 = r48 x x
"vmla.f32 q9, q10, %e19[0] \n"
"vmov d26, d25 \n" // q13 = r49 x x
"vext.32 q14, q10, q12, #1 \n" // q14 = r42
"vmla.f32 q8, q11, %e19[1] \n"
"vext.32 q15, q11, q13, #1 \n" // q15 = r43
"vmla.f32 q9, q14, %f19[0] \n"
"vext.32 q14, q10, q12, #2 \n" // q14 = r44
"vmla.f32 q8, q15, %f19[1] \n"
// r0
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01
"vmla.f32 q9, q14, %e20[0] \n"
"pld [%2, #128] \n"
"vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x
"vadd.f32 q9, q8, q9 \n"
"vmov q8, %q21 \n"
"subs %0, #1 \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424), // %20
"w"(_bias0) // %21
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = bias0;
#if __ARM_NEON
// TODO neon assembly optimize
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
ssha512_fmt_plug.c | /*
* ssha512 support for LDAP style password storage
*
* This software is Copyright (c) 2013 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_saltedsha2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_saltedsha2);
#else
#define MAX_SALT_LEN 16 // bytes, the base64 representation is longer
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2048 // i7 not using HT
#endif
#include "misc.h"
#include "formats.h"
#include "arch.h"
#include "options.h"
#include "johnswap.h"
#include "common.h"
#include "sha2.h"
#include "base64.h"
#include "memdbg.h"
#define FORMAT_LABEL "SSHA512"
#define FORMAT_NAME "LDAP"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH (55-MAX_SALT_LEN)
#define BINARY_SIZE (512 / 8)
#define BINARY_ALIGN 4
#define SALT_SIZE (MAX_SALT_LEN + sizeof(unsigned int))
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH ((BINARY_SIZE + 1 + MAX_SALT_LEN + 2) / 3 * 4)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define NSLDAP_MAGIC "{SSHA512}"
#define NSLDAP_MAGIC_LENGTH (sizeof(NSLDAP_MAGIC) - 1)
#define BASE64_ALPHABET \
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
struct s_salt
{
unsigned int len;
union {
unsigned char c[MAX_SALT_LEN];
ARCH_WORD_32 w32;
} data;
};
static struct s_salt *saved_salt;
static struct fmt_tests tests[] = {
{"{SSHA512}SCMmLlStPIxVtJc8Y6REiGTMsgSEFF7xVQFoYZYg39H0nEeDuK/fWxxNZCdSYlRgJK3U3q0lYTka3Nre2CjXzeNUjbvHabYP", "password"},
{"{SSHA512}WucBQuH6NyeRYMz6gHQddkJLwzTUXaf8Ag0n9YM0drMFHG9XCO+FllvvwjXmo5/yFPvs+n1JVvJmdsvX5XHYvSUn9Xw=", "test123"},
{"{SSHA512}uURShqzuCx/8BKVrc4HkTpYnv2eVfwEzg+Zi2AbsTQaIV7Xo6pDhRAZnp70h5P8MC6XyotrB2f27aLhhRj4GYrkJSFmbKmuF", "testpass"},
{NULL}
};
static unsigned char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void * binary(char *ciphertext) {
static char *realcipher;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE + 1 + SALT_SIZE, MEM_ALIGN_WORD);
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, BINARY_SIZE);
base64_decode(ciphertext, strlen(ciphertext), realcipher);
return (void*)realcipher;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int len;
if (strncasecmp(ciphertext, NSLDAP_MAGIC, NSLDAP_MAGIC_LENGTH))
return 0;
ciphertext += NSLDAP_MAGIC_LENGTH;
len = strspn(ciphertext, BASE64_ALPHABET);
if (len < (BINARY_SIZE+1+2)/3*4-2)
return 0;
len = strspn(ciphertext, BASE64_ALPHABET "=");
if (len != strlen(ciphertext))
return 0;
if (len & 3 || len > CIPHERTEXT_LENGTH)
return 0;
return 1;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
memcpy(saved_key[index], key, len + 1);
}
static void * get_salt(char * ciphertext)
{
static struct s_salt cursalt;
char *p;
char realcipher[CIPHERTEXT_LENGTH];
int len;
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, sizeof(realcipher));
memset(&cursalt, 0, sizeof(struct s_salt));
len = strlen(ciphertext);
base64_decode(ciphertext, len, realcipher);
// We now support any salt length up to SALT_SIZE
cursalt.len = (len + 3) / 4 * 3 - BINARY_SIZE;
p = &ciphertext[len];
while (*--p == '=')
cursalt.len--;
memcpy(cursalt.data.c, realcipher+BINARY_SIZE, cursalt.len);
return &cursalt;
}
static char *get_key(int index) {
return (char*)saved_key[index];
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int count){
return 1;
}
static void set_salt(void *salt) {
saved_salt = salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Update(&ctx, saved_salt->data.c, saved_salt->len);
SHA512_Final((unsigned char*)crypt_key[index], &ctx);
}
return count;
}
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0xf; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0xff; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0xfff; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0xffff; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0xfffff; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0xffffff; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key[index])[0] & 0x7ffffff; }
static int salt_hash(void *salt)
{
struct s_salt * mysalt = salt;
return mysalt->data.w32 & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_saltedsha2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
gimple.h | /* Gimple IR definitions.
Copyright (C) 2007-2013 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
#include "pointer-set.h"
#include "vec.h"
#include "ggc.h"
#include "basic-block.h"
#include "tree.h"
#include "tree-ssa-operands.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
typedef gimple gimple_seq_node;
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef vec<gimple> gimple_vec;
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
#define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR)
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use sub-codes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_FROM_THUNK = 1 << 0,
GF_CALL_RETURN_SLOT_OPT = 1 << 1,
GF_CALL_TAILCALL = 1 << 2,
GF_CALL_VA_ARG_PACK = 1 << 3,
GF_CALL_NOTHROW = 1 << 4,
GF_CALL_ALLOCA_FOR_VAR = 1 << 5,
GF_CALL_INTERNAL = 1 << 6,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_OMP_ATOMIC_NEED_VALUE = 1 << 0,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there are only two types of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0,
GIMPLE_DEBUG_SOURCE_BIND = 1
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* Iterator object for GIMPLE statement sequences. */
typedef struct
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
/* Sequence and basic block holding the statement. These fields
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
gimple_seq *seq;
basic_block bb;
} gimple_stmt_iterator;
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY((chain_next ("%h.next"))) gimple_statement_base {
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
basic_block bb;
/* [ WORD 4-5 ]
Linked lists of gimple statements. The next pointers form
a NULL terminated list, the prev pointers are a cyclic list.
A gimple statement is hence also a double-ended list of
statements, with the pointer itself being the first element,
and the prev pointer being the last. */
gimple next;
gimple GTY((skip)) prev;
};
/* Base structure for tuples with operands. */
struct GTY(()) gimple_statement_with_ops_base
{
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY(()) gimple_statement_with_ops
{
/* [ WORD 1-7 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 8 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops_base
{
/* [ WORD 1-7 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 8-9 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 10 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* Call statements that take both memory and register operands. */
struct GTY(()) gimple_statement_call
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 10-13 ] */
struct pt_solution call_used;
struct pt_solution call_clobbered;
/* [ WORD 14 ] */
union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) {
tree GTY ((tag ("0"))) fntype;
enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn;
} u;
/* [ WORD 15 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 8 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 9 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
tree types;
/* [ WORD 8 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Filter types. */
tree types;
/* [ WORD 8 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_ELSE */
struct GTY(()) gimple_statement_eh_else {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7,8 ] */
gimple_seq n_body, e_body;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 8 ] */
tree result;
/* [ WORD 9 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY(()) gimple_statement_eh_ctrl
{
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Exception region number. */
int region;
};
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 8 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 7 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY(()) gimple_statement_asm
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 10 ]
__asm__ statement. */
const char *string;
/* [ WORD 11 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 12 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 10 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 11 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ]
Clauses. */
tree clauses;
/* [ WORD 9 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 10 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
/* [ WORD 1-10 ] */
struct gimple_statement_omp_parallel par;
/* [ WORD 11 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 12-13 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 8 ] */
tree clauses;
/* [ WORD 9 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
tree control_def;
/* [ WORD 8 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
/* [ WORD 1-7 ] */
struct gimple_statement_omp omp;
/* [ WORD 7 ] */
tree clauses;
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7-8 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
/* [ WORD 1-6 ] */
struct gimple_statement_base gsbase;
/* [ WORD 7 ] */
tree val;
};
/* GIMPLE_TRANSACTION. */
/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
/* The __transaction_atomic was declared [[outer]] or it is
__transaction_relaxed. */
#define GTMA_IS_OUTER (1u << 0)
#define GTMA_IS_RELAXED (1u << 1)
#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
/* The transaction is seen to not have an abort. */
#define GTMA_HAVE_ABORT (1u << 2)
/* The transaction is seen to have loads or stores. */
#define GTMA_HAVE_LOAD (1u << 3)
#define GTMA_HAVE_STORE (1u << 4)
/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
/* The transaction WILL enter serial irrevocable mode.
An irrevocable block post-dominates the entire transaction, such
that all invocations of the transaction will go serial-irrevocable.
In such case, we don't bother instrumenting the transaction, and
tell the runtime that it should begin the transaction in
serial-irrevocable mode. */
#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
/* The transaction contains no instrumentation code whatsover, most
likely because it is guaranteed to go irrevocable upon entry. */
#define GTMA_HAS_NO_INSTRUMENTATION (1u << 7)
struct GTY(()) gimple_statement_transaction
{
/* [ WORD 1-9 ] */
struct gimple_statement_with_memory_ops_base gsbase;
/* [ WORD 10 ] */
gimple_seq body;
/* [ WORD 11 ] */
tree label;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
union GTY ((desc ("gimple_statement_structure (&%h)"),
chain_next ("%h.gsbase.next"), variable_size)) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call;
struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction;
};
/* In gimple.c. */
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
gimple gimple_build_return (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *);
gimple
gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree CXX_MEM_STAT_INFO);
gimple
gimple_build_assign_with_ops (enum tree_code, tree,
tree, tree, tree CXX_MEM_STAT_INFO);
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_source_bind(var,val,stmt) \
gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_call_vec (tree, vec<tree> );
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_valist (tree, unsigned, va_list);
gimple gimple_build_call_internal (enum internal_fn, unsigned, ...);
gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> );
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *,
vec<tree, va_gc> *, vec<tree, va_gc> *,
vec<tree, va_gc> *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_eh_else (gimple_seq, gimple_seq);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (tree, tree, vec<tree> );
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (vec<tree> );
void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
gimple_seq gimple_seq_alloc (void);
void gimple_seq_free (gimple_seq);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
bool gimple_call_same_target_p (const_gimple, const_gimple);
int gimple_call_flags (const_gimple);
int gimple_call_return_flags (const_gimple);
int gimple_call_arg_flags (const_gimple, unsigned);
void gimple_call_reset_alias_info (gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, basic_block);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code,
tree, tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
void gimple_replace_lhs (gimple, tree);
gimple gimple_copy (gimple);
void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
bool gimple_has_side_effects (const_gimple);
bool gimple_could_trap_p (gimple);
bool gimple_could_trap_p_1 (gimple, bool, bool);
bool gimple_assign_rhs_could_trap_p (gimple);
void gimple_regimplify_operands (gimple, gimple_stmt_iterator *);
bool empty_body_p (gimple_seq);
unsigned get_gimple_rhs_num_ops (enum tree_code);
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
const char *gimple_decl_printable_name (tree, int);
tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree);
tree gimple_extract_devirt_binfo_from_cst (tree);
/* Returns true iff T is a scalar register variable. */
extern bool is_gimple_reg (tree);
/* Returns true iff T is any sort of variable. */
extern bool is_gimple_variable (tree);
/* Returns true iff T is any sort of symbol. */
extern bool is_gimple_id (tree);
/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */
extern bool is_gimple_min_lval (tree);
/* Returns true iff T is something whose address can be taken. */
extern bool is_gimple_addressable (tree);
/* Returns true iff T is any valid GIMPLE lvalue. */
extern bool is_gimple_lvalue (tree);
/* Returns true iff T is a GIMPLE address. */
bool is_gimple_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address. */
bool is_gimple_invariant_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address at interprocedural
level. */
bool is_gimple_ip_invariant_address (const_tree);
/* Returns true iff T is a valid GIMPLE constant. */
bool is_gimple_constant (const_tree);
/* Returns true iff T is a GIMPLE restricted function invariant. */
extern bool is_gimple_min_invariant (const_tree);
/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */
extern bool is_gimple_ip_invariant (const_tree);
/* Returns true iff T is a GIMPLE rvalue. */
extern bool is_gimple_val (tree);
/* Returns true iff T is a GIMPLE asm statement input. */
extern bool is_gimple_asm_val (tree);
/* Returns true iff T is a valid address operand of a MEM_REF. */
bool is_gimple_mem_ref_addr (tree);
/* Returns true iff T is a valid if-statement condition. */
extern bool is_gimple_condexpr (tree);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
/* Return TRUE iff stmt is a call to a built-in function. */
extern bool is_gimple_builtin_call (gimple stmt);
extern void recalculate_side_effects (tree);
extern bool gimple_compare_field_offset (tree, tree);
extern tree gimple_register_canonical_type (tree);
extern void print_gimple_types_stats (const char *);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *,
unsigned *);
typedef bool (*walk_stmt_load_store_addr_fn) (gimple, tree, tree, void *);
extern bool walk_stmt_load_store_addr_ops (gimple, void *,
walk_stmt_load_store_addr_fn,
walk_stmt_load_store_addr_fn,
walk_stmt_load_store_addr_fn);
extern bool walk_stmt_load_store_ops (gimple, void *,
walk_stmt_load_store_addr_fn,
walk_stmt_load_store_addr_fn);
extern bool gimple_ior_addresses_taken (bitmap, gimple);
extern bool gimple_call_builtin_p (gimple, enum built_in_class);
extern bool gimple_call_builtin_p (gimple, enum built_in_function);
extern bool gimple_asm_clobbers_memory_p (const_gimple);
/* In gimplify.c */
extern tree create_tmp_var_raw (tree, const char *);
extern tree create_tmp_var_name (const char *);
extern tree create_tmp_var (tree, const char *);
extern tree create_tmp_reg (tree, const char *);
extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *);
extern tree get_formal_tmp_var (tree, gimple_seq *);
extern void declare_vars (tree, gimple, bool);
extern void annotate_all_with_location (gimple_seq, location_t);
/* Validation of GIMPLE expressions. Note that these predicates only check
the basic form of the expression, they don't recurse to make sure that
underlying nodes are also of the right form. */
typedef bool (*gimple_predicate)(tree);
/* FIXME we should deduce this from the predicate. */
enum fallback {
fb_none = 0, /* Do not generate a temporary. */
fb_rvalue = 1, /* Generate an rvalue to hold the result of a
gimplified expression. */
fb_lvalue = 2, /* Generate an lvalue to hold the result of a
gimplified expression. */
fb_mayfail = 4, /* Gimplification may fail. Error issued
afterwards. */
fb_either= fb_rvalue | fb_lvalue
};
typedef int fallback_t;
enum gimplify_status {
GS_ERROR = -2, /* Something Bad Seen. */
GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
GS_OK = 0, /* We did something, maybe more to do. */
GS_ALL_DONE = 1 /* The expression is fully gimplified. */
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
vec<gimple> bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
vec<tree> case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
bool in_cleanup_point_expr;
};
/* Return true if gimplify_one_sizepos doesn't need to gimplify
expr (when in TYPE_SIZE{,_UNIT} and similar type/decl size/bitsize
fields). */
static inline bool
is_gimple_sizepos (tree expr)
{
/* gimplify_one_sizepos doesn't need to do anything if the value isn't there,
is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do
anything if it's already a VAR_DECL. If it's a VAR_DECL from another
function, the gimplifier will want to replace it with a new variable,
but that will cause problems if this type is from outside the function.
It's OK to have that here. */
return (expr == NULL_TREE
|| TREE_CONSTANT (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr));
}
extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t);
extern void gimplify_type_sizes (tree, gimple_seq *);
extern void gimplify_one_sizepos (tree *, gimple_seq *);
enum gimplify_status gimplify_self_mod_expr (tree *, gimple_seq *, gimple_seq *,
bool, tree);
extern bool gimplify_stmt (tree *, gimple_seq *);
extern gimple gimplify_body (tree, bool);
extern void push_gimplify_context (struct gimplify_ctx *);
extern void pop_gimplify_context (gimple);
extern void gimplify_and_add (tree, gimple_seq *);
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
extern vec<gimple> gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree force_labels_r (tree *, int *, void *);
extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
gimple_seq *);
struct gimplify_omp_ctx;
extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
extern tree gimple_boolify (tree);
extern gimple_predicate rhs_predicate_for (tree);
extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
/* In trans-mem.c. */
extern void diagnose_tm_safe_errors (tree);
extern void compute_transaction_bits (void);
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
/* In gimplify.c. */
extern void gimplify_function_tree (tree);
/* In cfgexpand.c. */
extern tree gimple_assign_rhs_to_tree (gimple);
/* In builtins.c */
extern bool validate_gimple_arglist (const_gimple, ...);
/* In tree-ssa.c */
extern bool tree_ssa_useless_type_conversion (tree);
extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
/* In tree-ssa-coalesce.c */
extern bool gimple_can_coalesce_p (tree, tree);
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (gimple_seq s)
{
return s;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return n;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (gimple_seq s)
{
return s ? s->gsbase.prev : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return n;
}
/* Set the last node in GIMPLE sequence *PS to LAST. */
static inline void
gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
{
(*ps)->gsbase.prev = last;
}
/* Set the first node in GIMPLE sequence *PS to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
{
*ps = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (gimple_seq s)
{
return s == NULL;
}
void gimple_seq_add_stmt (gimple_seq *, gimple);
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void gimple_seq_add_stmt_without_update (gimple_seq *, gimple);
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
}
static inline gimple_seq *
bb_seq_addr (basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
bb->il.gimple.seq = seq;
}
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->gsbase.code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_EH_ELSE:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
case GIMPLE_TRANSACTION:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline basic_block
gimple_bb (const_gimple g)
{
return g->gsbase.bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return LOCATION_BLOCK (g->gsbase.location);
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
if (block)
g->gsbase.location =
COMBINE_LOCATION_DATA (line_table, g->gsbase.location, block);
else
g->gsbase.location = LOCATION_LOCUS (g->gsbase.location);
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->gsbase.location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->gsbase.location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->gsbase.location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->gsbase.no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->gsbase.no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->gsbase.visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->gsbase.visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->gsbase.plf |= (unsigned int) plf;
else
stmt->gsbase.plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->gsbase.plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->gsbase.uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->gsbase.uid;
}
/* Make statement G a singleton sequence. */
static inline void
gimple_init_singleton (gimple g)
{
g->gsbase.next = NULL;
g->gsbase.prev = g;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gcc_gimple_checking_assert (gimple_has_ops (g));
g->gsops.opbase.use_ops = use;
}
/* Return the set of VUSE operand for statement G. */
static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_USE_OPERAND_P;
ops = g->gsops.opbase.use_ops;
if (ops
&& USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
/* Return the set of VDEF operand for statement G. */
static inline def_operand_p
gimple_vdef_op (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_DEF_OPERAND_P;
if (g->gsmembase.vdef)
return &g->gsmembase.vdef;
return NULL_DEF_OPERAND_P;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gcc_gimple_checking_assert (gimple_has_mem_ops (g));
g->gsmembase.vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gcc_gimple_checking_assert (gimple_has_mem_ops (g));
g->gsmembase.vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
}
/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
a MODIFIED field. */
static inline void
gimple_set_modified (gimple s, bool modifiedp)
{
if (gimple_has_ops (s))
s->gsbase.modified = (unsigned) modifiedp;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->gsbase.subcode;
else
{
gcc_gimple_checking_assert (code == GIMPLE_CALL);
return CALL_EXPR;
}
}
/* Mark statement S as modified, and update it. */
static inline void
update_stmt (gimple s)
{
if (gimple_has_ops (s))
{
gimple_set_modified (s, true);
update_stmt_operands (s);
}
}
/* Update statement S if it has been optimized. */
static inline void
update_stmt_if_modified (gimple s)
{
if (gimple_modified_p (s))
update_stmt_operands (s);
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->gsbase.has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
}
/* Return true if BB is in a transaction. */
static inline bool
block_in_transaction (basic_block bb)
{
return flag_tm && bb->flags & BB_IN_TRANSACTION;
}
/* Return true if STMT is in a transaction. */
static inline bool
gimple_in_transaction (gimple stmt)
{
return block_in_transaction (gimple_bb (stmt));
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_SINGLE);
return s->gsbase.subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_gimple_checking_assert (subcode < (1 << 16));
s->gsbase.subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->gsbase.subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_NEED_VALUE flag set. */
static inline bool
gimple_omp_atomic_need_value_p (const_gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0;
}
/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
static inline void
gimple_omp_atomic_set_need_value (gimple g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->gsbase.num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->gsbase.num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_gimple_checking_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
gcc_gimple_checking_assert (i < gimple_num_ops (gs));
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs3 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 4)
return gimple_op (gs, 3);
else
return NULL_TREE;
}
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs3_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 3);
}
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs3 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 3, rhs);
}
/* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect
to see only a maximum of two operands. */
static inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL);
}
/* A wrapper around extract_ops_from_tree_1, for callers which expect
to see only a maximum of two operands. */
static inline void
extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0,
tree *op1)
{
tree op2;
extract_ops_from_tree_1 (expr, code, op0, op1, &op2);
gcc_assert (op2 == NULL_TREE);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->gsbase.nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->gsbase.nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = (enum tree_code) gs->gsbase.subcode;
/* While we initially set subcode to the TREE_CODE of the rhs for
GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay
in sync when we rewrite stmts into SSA form or do SSA propagations. */
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->gsbase.subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if GS is an assignment with a singleton RHS, i.e.,
there is no operator associated with the assignment itself.
Unlike gimple_assign_copy_p, this predicate returns true for
any RHS operand, including those that perform an operation
and do not have the semantics of a copy, such as COND_EXPR. */
static inline bool
gimple_assign_single_p (gimple gs)
{
return (is_gimple_assign (gs)
&& gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS);
}
/* Return true if GS performs a store to its lhs. */
static inline bool
gimple_store_p (gimple gs)
{
tree lhs = gimple_get_lhs (gs);
return lhs && !is_gimple_reg (lhs);
}
/* Return true if GS is an assignment that loads from its rhs1. */
static inline bool
gimple_assign_load_p (gimple gs)
{
tree rhs;
if (!gimple_assign_single_p (gs))
return false;
rhs = gimple_assign_rhs1 (gs);
if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
return true;
rhs = get_base_address (rhs);
return (DECL_P (rhs)
|| TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF);
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if S is a clobber statement. */
static inline bool
gimple_clobber_p (gimple s)
{
return gimple_assign_single_p (s)
&& TREE_CLOBBER_P (gimple_assign_rhs1 (s));
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return true if call GS calls an internal-only function, as enumerated
by internal_fn. */
static inline bool
gimple_call_internal_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0;
}
/* Return the target of internal call GS. */
static inline enum internal_fn
gimple_call_internal_fn (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
return gs->gimple_call.u.internal_fn;
}
/* Return the function type of the function called by GS. */
static inline tree
gimple_call_fntype (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
if (gimple_call_internal_p (gs))
return NULL_TREE;
return gs->gimple_call.u.fntype;
}
/* Set the type of the function called by GS to FNTYPE. */
static inline void
gimple_call_set_fntype (gimple gs, tree fntype)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gs->gimple_call.u.fntype = fntype;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* Set internal function FN to be the function called by call statement GS. */
static inline void
gimple_call_set_internal_fn (gimple gs, enum internal_fn fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
gs->gimple_call.u.internal_fn = fn;
}
/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL
associated with the callee if known. Otherwise return NULL_TREE. */
static inline tree
gimple_call_addr_fndecl (const_tree fn)
{
if (fn && TREE_CODE (fn) == ADDR_EXPR)
{
tree fndecl = TREE_OPERAND (fn, 0);
if (TREE_CODE (fndecl) == MEM_REF
&& TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR
&& integer_zerop (TREE_OPERAND (fndecl, 1)))
fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0);
if (TREE_CODE (fndecl) == FUNCTION_DECL)
return fndecl;
}
return NULL_TREE;
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
return gimple_call_addr_fndecl (gimple_call_fn (gs));
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree type = gimple_call_fntype (gs);
if (type == NULL_TREE)
return TREE_TYPE (gimple_call_lhs (gs));
/* The type returned by a function is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->gsbase.subcode |= GF_CALL_TAILCALL;
else
s->gsbase.subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->gsbase.subcode |= GF_CALL_FROM_THUNK;
else
s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
else
s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->gsbase.subcode |= GF_CALL_NOTHROW;
else
s->gsbase.subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
stack growth even when they occur in loops. */
static inline void
gimple_call_set_alloca_for_var (gimple s, bool for_var)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (for_var)
s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR;
else
s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR;
}
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
static inline bool
gimple_call_alloca_for_var_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->gsbase.subcode = orig_call->gsbase.subcode;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_use_set (gimple call)
{
GIMPLE_CHECK (call, GIMPLE_CALL);
return &call->gimple_call.call_used;
}
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL. */
static inline struct pt_solution *
gimple_call_clobber_set (gimple call)
{
GIMPLE_CHECK (call, GIMPLE_CALL);
return &call->gimple_call.call_clobbered;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->gsbase.subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->gsbase.subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (var != 0)' or
'if (var == 1)' */
static inline bool
gimple_cond_single_var_p (gimple gs)
{
if (gimple_cond_code (gs) == NE_EXPR
&& gimple_cond_rhs (gs) == boolean_false_node)
return true;
if (gimple_cond_code (gs) == EQ_EXPR
&& gimple_cond_rhs (gs) == boolean_true_node)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
}
static inline gimple_seq *
gimple_bind_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return &gs->gimple_bind.body;
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
return *gimple_bind_body_ptr (gs);
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_seq (&gs->gimple_bind.body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gcc_gimple_checking_assert (block == NULL_TREE
|| TREE_CODE (block) == BLOCK);
gs->gimple_bind.block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.ni);
return gimple_op (gs, index + gs->gimple_asm.no);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.ni);
return gimple_op_ptr (gs, index + gs->gimple_asm.no);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.ni
&& TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.no, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.no);
return gimple_op (gs, index);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.no);
return gimple_op_ptr (gs, index);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.no
&& TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nc);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nc
&& TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nl);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_gimple_checking_assert (index < gs->gimple_asm.nl
&& TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->gsbase.subcode |= GF_ASM_VOLATILE;
else
gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->gsbase.subcode |= GF_ASM_INPUT;
else
gs->gsbase.subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.types;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.handler;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
return *gimple_catch_handler_ptr (gs);
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.types;
}
/* Return a pointer to the sequence of statement to execute when
GIMPLE_EH_FILTER statement fails. */
static inline gimple_seq *
gimple_eh_filter_failure_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.failure;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
return *gimple_eh_filter_failure_ptr (gs);
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
return gs->gimple_eh_mnt.fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
gs->gimple_eh_mnt.fndecl = decl;
}
/* GIMPLE_EH_ELSE accessors. */
static inline gimple_seq *
gimple_eh_else_n_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
return &gs->gimple_eh_else.n_body;
}
static inline gimple_seq
gimple_eh_else_n_body (gimple gs)
{
return *gimple_eh_else_n_body_ptr (gs);
}
static inline gimple_seq *
gimple_eh_else_e_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
return &gs->gimple_eh_else.e_body;
}
static inline gimple_seq
gimple_eh_else_e_body (gimple gs)
{
return *gimple_eh_else_e_body_ptr (gs);
}
static inline void
gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
gs->gimple_eh_else.n_body = seq;
}
static inline void
gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
gs->gimple_eh_else.e_body = seq;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
|| kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->gsbase.subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return a pointer to the sequence of statements used as the
body for GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_eval_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return &gs->gimple_try.eval;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
return *gimple_try_eval_ptr (gs);
}
/* Return a pointer to the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq *
gimple_try_cleanup_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return &gs->gimple_try.cleanup;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
return *gimple_try_cleanup_ptr (gs);
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.cleanup = cleanup;
}
/* Return a pointer to the cleanup sequence for cleanup statement GS. */
static inline gimple_seq *
gimple_wce_cleanup_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return &gs->gimple_wce.cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
return *gimple_wce_cleanup_ptr (gs);
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gimple_wce.cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gsbase.subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gsbase.subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return &gs->gimple_phi.result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gs->gimple_phi.result = result;
if (result && TREE_CODE (result) == SSA_NAME)
SSA_NAME_DEF_STMT (result) = gs;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity);
return &(gs->gimple_phi.args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs);
gs->gimple_phi.args[index] = *phiarg;
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
gs->gimple_eh_ctrl.region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
gs->gimple_eh_ctrl.region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_gimple_checking_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
&& (label == NULL_TREE
|| TREE_CODE (label) == CASE_LABEL_EXPR));
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
tree label = gimple_switch_label (gs, 0);
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
return label;
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_bind_p (dbg));
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
static inline bool
gimple_debug_source_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline tree
gimple_debug_source_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline tree
gimple_debug_source_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG source bind statement. */
static inline tree *
gimple_debug_source_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
static inline void
gimple_debug_source_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
static inline void
gimple_debug_source_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg));
gimple_set_op (dbg, 1, value);
}
/* Return a pointer to the body for the OMP statement GS. */
static inline gimple_seq *
gimple_omp_body_ptr (gimple gs)
{
return &gs->omp.body;
}
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return *gimple_omp_body_ptr (gs);
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
gs->omp.body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return gs->gimple_omp_critical.name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return &gs->gimple_omp_critical.name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
gs->gimple_omp_critical.name = name;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].incr = incr;
}
/* Return a pointer to the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq *
gimple_omp_for_pre_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.pre_body;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
return *gimple_omp_for_pre_body_ptr (gs);
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return gs->gimple_omp_single.clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return &gs->gimple_omp_single.clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
gs->gimple_omp_single.clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison
&& i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gimple_omp_atomic_store.val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return g->gimple_omp_atomic_store.val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return &g->gimple_omp_atomic_store.val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_use = use;
}
/* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq *
gimple_transaction_body_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return &gs->gimple_transaction.body;
}
/* Return the body for the GIMPLE_TRANSACTION statement GS. */
static inline gimple_seq
gimple_transaction_body (gimple gs)
{
return *gimple_transaction_body_ptr (gs);
}
/* Return the label associated with a GIMPLE_TRANSACTION. */
static inline tree
gimple_transaction_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gimple_transaction.label;
}
static inline tree *
gimple_transaction_label_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return &gs->gimple_transaction.label;
}
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
static inline unsigned int
gimple_transaction_subcode (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
return gs->gsbase.subcode;
}
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
static inline void
gimple_transaction_set_body (gimple gs, gimple_seq body)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gimple_transaction.body = body;
}
/* Set the label associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gimple_transaction.label = label;
}
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
static inline void
gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
{
GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
gs->gsbase.subcode = subcode;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statement STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->gsbase.subcode |= GF_PREDICT_TAKEN;
else
gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
type = gimple_call_return_type (stmt);
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Return true if TYPE is a suitable type for a scalar register variable. */
static inline bool
is_gimple_reg_type (tree type)
{
return !AGGREGATE_TYPE_P (type);
}
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
gsi_start_1 (gimple_seq *seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_first (*seq);
i.seq = seq;
i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
return i;
}
#define gsi_start(x) gsi_start_1(&(x))
static inline gimple_stmt_iterator
gsi_none (void)
{
gimple_stmt_iterator i;
i.ptr = NULL;
i.seq = NULL;
i.bb = NULL;
return i;
}
/* Return a new iterator pointing to the first statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq *seq;
seq = bb_seq_addr (bb);
i.ptr = gimple_seq_first (*seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
gsi_last_1 (gimple_seq *seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_last (*seq);
i.seq = seq;
i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
return i;
}
#define gsi_last(x) gsi_last_1(&(x))
/* Return a new iterator pointing to the last statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq *seq;
seq = bb_seq_addr (bb);
i.ptr = gimple_seq_last (*seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return true if I is at the end of its sequence. */
static inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
}
/* Return true if I is one statement before the end of its sequence. */
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->gsbase.next == NULL;
}
/* Advance the iterator to the next gimple statement. */
static inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->gsbase.next;
}
/* Advance the iterator to the previous gimple statement. */
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
gimple prev = i->ptr->gsbase.prev;
if (prev->gsbase.next)
i->ptr = prev;
else
i->ptr = NULL;
}
/* Return the current stmt. */
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr;
}
/* Return a block statement iterator that points to the first non-label
statement in block BB. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
return gsi;
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_next (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_prev (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_next_nondebug (&i);
return i;
}
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_prev_nondebug (&i);
return i;
}
/* Return the basic block associated with this iterator. */
static inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
}
/* Return the sequence associated with this iterator. */
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return *i.seq;
}
enum gsi_iterator_update
{
GSI_NEW_STMT, /* Only valid when single statement is added, move
iterator to it. */
GSI_SAME_STMT, /* Leave the iterator at the same statement. */
GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
for linking other statements in the same
direction. */
};
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
void gsi_split_seq_before (gimple_stmt_iterator *, gimple_seq *);
void gsi_set_stmt (gimple_stmt_iterator *, gimple);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
void gsi_replace_with_seq (gimple_stmt_iterator *, gimple_seq, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_after (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
bool gsi_remove (gimple_stmt_iterator *, bool);
gimple_stmt_iterator gsi_for_stmt (gimple);
void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_to_bb_end (gimple_stmt_iterator *, basic_block);
void gsi_insert_on_edge (edge, gimple);
void gsi_insert_seq_on_edge (edge, gimple_seq);
basic_block gsi_insert_on_edge_immediate (edge, gimple);
basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
void gsi_commit_one_edge_insert (edge, basic_block *);
void gsi_commit_edge_inserts (void);
gimple gimple_call_copy_skip_args (gimple, bitmap);
/* Convenience routines to walk all statements of a gimple function.
Note that this is useful exclusively before the code is converted
into SSA form. Once the program is in SSA form, the standard
operand interface should be used to analyze/modify statements. */
struct walk_stmt_info
{
/* Points to the current statement being walked. */
gimple_stmt_iterator gsi;
/* Additional data that the callback functions may want to carry
through the recursion. */
void *info;
/* Pointer map used to mark visited tree nodes when calling
walk_tree on each operand. If set to NULL, duplicate tree nodes
will be visited more than once. */
struct pointer_set_t *pset;
/* Operand returned by the callbacks. This is set when calling
walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
returns non-NULL, this field will contain the tree returned by
the last callback. */
tree callback_result;
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
means the common subset of is_gimple_lvalue and is_gimple_rhs,
but we never try to form anything more complicated than that, so
we don't bother checking.
Also note that CALLBACK should update this flag while walking the
sub-expressions of a statement. For instance, when walking the
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
BOOL_BITFIELD val_only : 1;
/* True if we are currently walking the LHS of an assignment. */
BOOL_BITFIELD is_lhs : 1;
/* Optional. Set to true by the callback functions if they made any
changes. */
BOOL_BITFIELD changed : 1;
/* True if we're interested in location information. */
BOOL_BITFIELD want_locations : 1;
/* True if we've removed the statement that was processed. */
BOOL_BITFIELD removed_stmt : 1;
};
/* Callback for walk_gimple_stmt. Called for every statement found
during traversal. The first argument points to the statement to
walk. The second argument is a flag that the callback sets to
'true' if it the callback handled all the operands and
sub-statements of the statement (the default value of this flag is
'false'). The third argument is an anonymous pointer to data
to be used by the callback. */
typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
gimple walk_gimple_seq_mod (gimple_seq *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
extern void dump_gimple_statistics (void);
/* In gimple-fold.c. */
void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree);
tree gimple_fold_builtin (gimple);
bool fold_stmt (gimple_stmt_iterator *);
bool fold_stmt_inplace (gimple_stmt_iterator *);
tree get_symbol_constant_value (tree);
tree canonicalize_constructor_val (tree, tree);
extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
bool gimple_val_nonnegative_real_p (tree);
#endif /* GCC_GIMPLE_H */
|
ncpdq.c | /* $Header$ */
/* ncpdq -- netCDF pack, re-dimension, query */
/* Purpose: Pack, re-dimension, query single netCDF file and output to a single file */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
ncpdq -O -D 3 -a lat,lev,lon -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -a lon,lev,lat -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -a lon,time -x -v three_double_dmn ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc
ncpdq -O -D 3 -P all_new ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P all_xst ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P xst_new ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -M dbl_flt ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -M flt_dbl ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -P upk ~/nco/data/in.nc ~/foo.nc
ncpdq -O -D 3 -a lon,lat -g g21,g22 ~/nco/data/in_grp_3.nc ~/foo.nc
ncpdq -O -D 3 -g g1 -v v1 --union -G dude -p ~/nco/data in_grp.nc ~/foo.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* abs, getopt, malloc, strtol */
#include <string.h> /* strcmp() */
#include <time.h> /* machine time */
#ifndef _MSC_VER
# include <unistd.h> /* POSIX stuff */
#endif
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
#ifdef I18N
# include <langinfo.h> /* nl_langinfo() */
# include <libintl.h> /* Internationalization i18n */
# include <locale.h> /* Locale setlocale() */
# define _(sng) gettext (sng)
# define gettext_noop(sng) (sng)
# define N_(sng) gettext_noop(sng)
#endif /* I18N */
/* Supply stub gettext() function in case i18n failed */
#ifndef _LIBINTL_H
# define gettext(foo) foo
#endif /* _LIBINTL_H */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
# include <mpi.h> /* MPI definitions */
# include <netcdf_par.h> /* Parallel netCDF definitions */
# include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
int
main(int argc,char **argv)
{
aed_sct *aed_lst_add_fst=NULL_CEWI;
aed_sct *aed_lst_scl_fct=NULL_CEWI;
char **dmn_rdr_lst_in=NULL_CEWI; /* Option a */
char **fl_lst_abb=NULL; /* Option n */
char **fl_lst_in=NULL_CEWI;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char **grp_lst_in=NULL_CEWI;
char *aux_arg[NC_MAX_DIMS];
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL_CEWI;
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */
char *nco_pck_map_sng=NULL_CEWI; /* [sng] Packing map Option M */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl; /* [sng] Local copy of system optarg */
char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
char add_fst_sng[]="add_offset"; /* [sng] Unidata standard string for add offset */
char scl_fct_sng[]="scale_factor"; /* [sng] Unidata standard string for scale factor */
char trv_pth[]="/"; /* [sng] Root path of traversal tree */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567Aa:CcD:d:Fg:G:hL:l:M:Oo:P:p:Rrt:v:UxZ-:";
cnk_sct cnk; /* [sct] Chunking structure */
cnv_sct *cnv; /* [sct] Convention structure */
#if defined(__cplusplus) || defined(PGI_CC)
ddra_info_sct ddra_info;
ddra_info.flg_ddra=False;
#else /* !__cplusplus */
ddra_info_sct ddra_info={.flg_ddra=False};
#endif /* !__cplusplus */
dmn_sct **dmn_rdr_trv=NULL; /* [sct] Dimension structures to be re-ordered (from global table) */
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */
int *in_id_arr;
int abb_arg_nbr=0;
int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int dmn_rdr_nbr=0; /* [nbr] Number of dimension to re-order */
int dmn_rdr_nbr_trv=0; /* [nbr] Number of dimension to re-order (from global table) */
int dmn_rdr_nbr_in=0; /* [nbr] Original number of dimension to re-order */
int fl_idx=int_CEWI;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx=int_CEWI;
int idx_rdr=int_CEWI;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int nco_pck_map=nco_pck_map_flt_sht; /* [enm] Packing map */
int nco_pck_plc=nco_pck_plc_nil; /* [enm] Packing policy */
int opt;
int out_id;
int ppc_nbr=0; /* [nbr] Number of PPC arguments */
int rcd=NC_NOERR; /* [rcd] Return code */
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int var_lst_in_nbr=0;
int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */
md5_sct *md5=NULL; /* [sct] MD5 configuration */
nco_bool *dmn_rvr_rdr=NULL; /* [flg] Reverse dimensions */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */
nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool IS_REORDER=False; /* Re-order mode */
nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */
nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
nco_bool flg_dmn_prc_usr_spc=False; /* [flg] Processed dimensions specified on command line */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out;
var_sct **var_prc;
var_sct **var_prc_out;
trv_tbl_sct *trv_tbl=NULL; /* [lst] Traversal table */
nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */
{"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */
{"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */
{"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */
{"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */
{"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */
{"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */
{"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */
{"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
{"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
{"help",no_argument,0,0},
{"hlp",no_argument,0,0},
{"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */
{"mrd",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */
{"multiple_record_dimension",no_argument,0,0}, /* [enm] Multiple Record Dimension convention */
{"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */
{"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */
{"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */
{"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"intersection",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */
{"nsx",no_argument,0,0}, /* [flg] Select intersection of specified groups and variables */
{"union",no_argument,0,0}, /* [flg] Select union of specified groups and variables */
{"unn",no_argument,0,0}, /* [flg] Select union of specified groups and variables */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */
{"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */
{"upk",required_argument,0,0}, /* [enm] Unpacking convention to utilize */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"arrange",required_argument,0,'a'},
{"permute",required_argument,0,'a'},
{"reorder",required_argument,0,'a'},
{"rdr",required_argument,0,'a'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"dbg_lvl",required_argument,0,'D'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"gpe",required_argument,0,'G'}, /* [sng] Group Path Edit (GPE) */
{"grp",required_argument,0,'g'},
{"group",required_argument,0,'g'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"pack_map",required_argument,0,'M'},
{"pck_map",required_argument,0,'M'},
{"map",required_argument,0,'M'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"pack_policy",required_argument,0,'P'},
{"pck_plc",required_argument,0,'P'},
{"path",required_argument,0,'p'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"unpack",no_argument,0,'U'},
{"variable",required_argument,0,'v'},
{"auxiliary",required_argument,0,'X'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
/* Initialize traversal table */
trv_tbl_init(&trv_tbl);
/* Start timer and save command line */
ddra_info.tmr_flg=nco_tmr_srt;
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_mtd;
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
#ifdef ENABLE_MPI
/* MPI Initialization */
if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm);
MPI_Init(&argc,&argv);
MPI_Comm_size(mpi_cmm,&prc_nbr);
MPI_Comm_rank(mpi_cmm,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){
cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_csh_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */
if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */
if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */
if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */
if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
} /* endif "help" */
if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"mrd") || !strcmp(opt_crr,"multiple_record_dimension")) nco_mrd_cnv=nco_mrd_allow; /* [enm] Multiple Record Dimension convention */
if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){
ppc_arg[ppc_nbr]=(char *)strdup(optarg);
ppc_nbr++;
} /* endif "ppc" */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */
if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */
if(!strcmp(opt_crr,"unn") || !strcmp(opt_crr,"union")) GRP_VAR_UNN=True;
if(!strcmp(opt_crr,"nsx") || !strcmp(opt_crr,"intersection")) GRP_VAR_UNN=False;
if(!strcmp(opt_crr,"upk")){ /* [enm] Unpacking convention to utilize */
nco_upk_cnv=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'a': /* Re-order dimensions */
flg_dmn_prc_usr_spc=True;
dmn_rdr_lst_in=nco_lst_prs_2D(optarg,",",&dmn_rdr_nbr_in);
dmn_rdr_nbr=dmn_rdr_nbr_in;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'G': /* Apply Group Path Editing (GPE) to output group */
/* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it
http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */
gpe=nco_gpe_prs_arg(optarg);
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case 'g': /* Copy group argument for later processing */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'M': /* Packing map */
nco_pck_map_sng=(char *)strdup(optarg);
nco_pck_map=nco_pck_map_get(nco_pck_map_sng);
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'P': /* Packing policy */
nco_pck_plc_sng=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'U': /* Unpacking switch */
nco_pck_plc_sng=(char *)strdup("upk");
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'X': /* Copy auxiliary coordinate argument for later processing */
aux_arg[aux_nbr]=(char *)strdup(optarg);
aux_nbr++;
MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */
(void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Set/report global chunk cache */
rcd+=nco_cnk_csh_ini(cnk_csh_byt);
/* Set re-order flag */
if(dmn_rdr_nbr > 0) IS_REORDER=True;
/* No re-order dimensions specified implies packing request */
if(dmn_rdr_nbr == 0){
if(nco_pck_plc == nco_pck_plc_nil) nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: DEBUG Packing map is %s and packing policy is %s\n",nco_prg_nm_get(),nco_pck_map_sng_get(nco_pck_map),nco_pck_plc_sng_get(nco_pck_plc));
} /* dmn_rdr_nbr != 0 */
/* From this point forward, assume ncpdq operator packs or re-orders, not both */
if(dmn_rdr_nbr > 0 && nco_pck_plc != nco_pck_plc_nil){
(void)fprintf(fp_stdout,"%s: ERROR %s does not support simultaneous dimension re-ordering (-a switch) and packing (-P switch).\nHINT: Invoke %s twice, once to re-order (with -a), and once to pack (with -P).\n",nco_prg_nm,nco_prg_nm,nco_prg_nm);
nco_exit(EXIT_FAILURE);
} /* endif */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Get file format */
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */
(void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,xtr_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl);
/* Were all user-specified dimensions found? */
(void)nco_chk_dmn(lmt_nbr,flg_dne);
/* Create reversed dimension list */
if(dmn_rdr_nbr_in > 0){
dmn_rvr_rdr=(nco_bool *)nco_malloc(dmn_rdr_nbr_in*sizeof(nco_bool));
/* Is dimension to be reversed? i.e., does string begin with minus-sign '-'? */
for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){
if(dmn_rdr_lst_in[idx_rdr][0] == '-'){
dmn_rvr_rdr[idx_rdr]=True;
/* Strip-out '-': Copy string to new memory one past negative sign to avoid losing byte */
optarg_lcl=dmn_rdr_lst_in[idx_rdr];
dmn_rdr_lst_in[idx_rdr]=(char *)strdup(optarg_lcl+1L);
optarg_lcl=(char *)nco_free(optarg_lcl);
}else{
dmn_rvr_rdr[idx_rdr]=False;
} /* !'-' */
} /* !idx_rdr */
} /* !dmn_rdr_nbr_in */
/* Get number of variables, dimensions, and global attributes in file, file format */
(void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,(int *)NULL,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl);
/* Create list of dimensions to average(ncwa)/re-order(ncpdq) */
if(IS_REORDER) (void)nco_dmn_avg_mk(in_id,dmn_rdr_lst_in,dmn_rdr_nbr_in,flg_dmn_prc_usr_spc,False,trv_tbl,&dmn_rdr_trv,&dmn_rdr_nbr_trv);
/* Fill-in variable structure list for all extracted variables */
var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl);
/* Duplicate to output array */
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over variables */
/* Refresh var_out with dim_out data */
(void)nco_var_dmn_refresh(var_out,xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_map,nco_pck_plc,dmn_rdr_trv,dmn_rdr_nbr_trv,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl);
/* Store processed and fixed variables info into GTT */
(void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl);
/* We now have final list of variables to extract. Phew. */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Initialize, decode, and set PPC information */
if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl);
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
/* Initialize chunking from user-specified inputs */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk);
if(IS_REORDER){
dmn_sct **dmn_rdr=NULL; /* [sct] Dimension structures to be re-ordered */
/* "dmn_rdr" is only used for input to function nco_var_dmn_rdr_mtd(), that compares dimensions by short name;
this is because the input list of -a are dimension short names; group support is obtained combining with -g option;
on input it contains a list of dimension short names (in "dmn_rdr"), that together with input array "dmn_rvr_rdr"
of flags that determine if dimension at index dmn_rvr_rdr[index] is to be reversed; use cases:
in_grp_8.nc contains the dimensions /g1/lat, /g1/lon, /g2/lat, /g2/lon
ncpdq -O -v lat,lon -a -lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc
"dmn_rdr" contains names ["lat"], ["lon"], striped of '-' (minus) sign and dmn_rvr_rdr contains [True],[True ]
output is reversed /g1/lat, /g1/lon, /g2/lat, /g2/lon
ncpdq -O -v lat,lon -a lat,-lon -g g1,g2 ~/nco/data/in_grp_8.nc out1.nc
"dmn_rdr" contains names ["lat"], ["lon"], and dmn_rvr_rdr contains [False],[True ]
output is reversed /g1/lon, /g2/lon */
/* Form list of re-ordering dimensions from extracted input dimensions */
dmn_rdr=(dmn_sct **)nco_malloc(dmn_rdr_nbr*sizeof(dmn_sct *));
/* Initialize re-ordering dimensions; initialize only short name */
for(idx_rdr=0;idx_rdr<dmn_rdr_nbr_in;idx_rdr++){
dmn_rdr[idx_rdr]=(dmn_sct *)nco_malloc(sizeof(dmn_sct));
dmn_rdr[idx_rdr]->nm=(char *)strdup(dmn_rdr_lst_in[idx_rdr]);
dmn_rdr[idx_rdr]->nm_fll=NULL;
dmn_rdr[idx_rdr]->id=-1;
}
/* Determine and set new dimensionality in metadata of each re-ordered variable */
(void)nco_var_dmn_rdr_mtd_trv(trv_tbl,nbr_var_prc,var_prc,var_prc_out,nbr_var_fix,var_fix,dmn_rdr,dmn_rdr_nbr,dmn_rvr_rdr);
for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_in; idx_rdr++){
dmn_rdr[idx_rdr]->nm=(char *)nco_free(dmn_rdr[idx_rdr]->nm);
dmn_rdr[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr[idx_rdr]);
}
dmn_rdr=(dmn_sct **)nco_free(dmn_rdr);
} /* IS_REORDER */
/* Alter metadata for variables that will be packed */
if(nco_pck_plc != nco_pck_plc_nil){
if(nco_pck_plc != nco_pck_plc_upk){
/* Allocate attribute list container for maximum number of entries */
aed_lst_add_fst=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct));
aed_lst_scl_fct=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct));
} /* endif packing */
for(idx=0;idx<nbr_var_prc;idx++){
nco_pck_mtd(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc);
if(nco_pck_plc != nco_pck_plc_upk){
/* Use same copy of attribute name for all edits */
aed_lst_add_fst[idx].att_nm=add_fst_sng;
aed_lst_scl_fct[idx].att_nm=scl_fct_sng;
} /* endif packing */
} /* end loop over var_prc */
/* Transfer variable type to table. NB: Use processed variables set with new type. MUST be done before variable definition. */
(void)nco_var_typ_trv(nbr_var_prc,var_prc_out,trv_tbl);
} /* nco_pck_plc == nco_pck_plc_nil */
/* Define dimensions, extracted groups, variables, and attributes in output file. NB: record name is NULL */
(void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,True,False,nco_pck_plc,(char *)NULL,trv_tbl);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
/* Copy variable data for non-processed variables */
(void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl);
/* Close first input netCDF file */
nco_close(in_id);
/* Loop over input files (not currently used, fl_nbr == 1) */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,", local file is %s",fl_in);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n");
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
/* Timestamp end of metadata setup and disk layout */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
ddra_info.tmr_flg=nco_tmr_rgl;
#ifdef _OPENMP
#pragma omp parallel for private(idx,in_id) shared(aed_lst_add_fst,aed_lst_scl_fct,nco_dbg_lvl,dmn_rdr_nbr,gpe,in_id_arr,nbr_var_prc,nco_pck_map,nco_pck_plc,out_id,nco_prg_nm,rcd,var_prc,var_prc_out,nbr_dmn_fl,trv_tbl,IS_REORDER,fl_out_fmt)
#endif /* !_OPENMP */
/* Process all variables in current file */
for(idx=0;idx<nbr_var_prc;idx++){
char *grp_out_fll=NULL; /* [sng] Group name */
int grp_out_id; /* [ID] Group ID (output) */
int var_out_id; /* [ID] Variable ID (output) */
trv_sct *var_trv; /* [sct] Variable GTT object */
in_id=in_id_arr[omp_get_thread_num()];
var_prc[idx]->nc_id=in_id;
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Obtain variable GTT object using full variable name */
var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl);
/* Retrieve variable from disk into memory */
(void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl);
/* If re-ordering */
if(IS_REORDER){
if((var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type))) == NULL){
(void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm);
nco_exit(EXIT_FAILURE);
} /* endif err */
/* Change dimensionionality of values */
(void)nco_var_dmn_rdr_val_trv(var_prc[idx],var_prc_out[idx],trv_tbl);
/* Re-ordering required two value buffers, time to free() input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* IS_REORDER */
/* Edit group name for output */
if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll);
/* Obtain output group ID */
(void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id);
/* Memory management after current extracted group */
if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll);
/* Get variable ID */
(void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id);
/* Store the output variable ID */
var_prc_out[idx]->id=var_out_id;
if(nco_pck_plc != nco_pck_plc_nil){
/* Copy input variable buffer to processed variable buffer */
/* fxm: this is dangerous and leads to double free()'ing variable buffer */
var_prc_out[idx]->val=var_prc[idx]->val;
/* (Un-)Pack variable according to packing specification */
nco_pck_val(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc,aed_lst_add_fst+idx,aed_lst_scl_fct+idx);
} /* endif nco_pck_plc != nco_pck_plc_nil */
if(var_trv->ppc != NC_MAX_INT){
if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val);
} /* endif ppc */
if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp);
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{ /* begin OpenMP critical */
/* Copy variable to output file then free value buffer */
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
(void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is array */
} /* end OpenMP critical */
/* Free current output buffer */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end (OpenMP parallel for) loop over idx */
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\n");
/* Write/overwrite packing attributes for newly packed and re-packed variables
Logic here should nearly mimic logic in nco_var_dfn() */
if(nco_pck_plc != nco_pck_plc_nil && nco_pck_plc != nco_pck_plc_upk){
/* ...put file in define mode to allow metadata writing... */
(void)nco_redef(out_id);
/* ...loop through all variables that may have been packed... */
for(idx=0;idx<nbr_var_prc;idx++){
char *grp_out_fll=NULL; /* [sng] Group name */
int grp_out_id; /* [ID] Group ID (output) */
int var_out_id; /* [ID] Variable ID (output) */
trv_sct *var_trv; /* [sct] Variable GTT object */
/* Obtain variable GTT object using full variable name */
var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl);
/* Edit group name for output */
if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll);
/* Obtain output group ID */
(void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id);
/* Memory management after current extracted group */
if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll);
/* Get variable ID */
(void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id);
/* nco_var_dfn() pre-defined dummy packing attributes in output file only for "packable" input variables */
if(nco_pck_plc_typ_get(nco_pck_map,var_prc[idx]->typ_upk,(nc_type *)NULL)){
/* Verify input variable was newly packed by this operator
Writing pre-existing (non-re-packed) attributes here would fail because
nco_pck_dsk_inq() never fills in var->scl_fct.vp and var->add_fst.vp
Logic is same as in nco_var_dfn() (except var_prc[] instead of var[])
If operator newly packed this particular variable... */
if(
/* ...either because operator newly packs all variables... */
(nco_pck_plc == nco_pck_plc_all_new_att && nco_pck_map != nco_pck_map_dbl_flt && nco_pck_map != nco_pck_map_flt_dbl) ||
/* ...or because operator newly packs un-packed variables like this one... */
(nco_pck_plc == nco_pck_plc_all_xst_att && !var_prc[idx]->pck_ram) ||
/* ...or because operator re-packs packed variables like this one... */
(nco_pck_plc == nco_pck_plc_xst_new_att && var_prc[idx]->pck_ram)
){
/* Replace dummy packing attributes with final values, or delete them */
if(nco_dbg_lvl >= nco_dbg_io) (void)fprintf(stderr,"%s: main() replacing dummy packing attribute values for variable %s\n",nco_prg_nm,var_prc[idx]->nm);
(void)nco_aed_prc(grp_out_id,aed_lst_add_fst[idx].id,aed_lst_add_fst[idx]);
(void)nco_aed_prc(grp_out_id,aed_lst_scl_fct[idx].id,aed_lst_scl_fct[idx]);
} /* endif variable is newly packed by this operator */
} /* !nco_pck_plc_alw */
} /* end loop over var_prc */
/* Take output file out of define mode */
if(hdr_pad == 0UL) (void)nco_enddef(out_id); else (void)nco__enddef(out_id,hdr_pad);
} /* nco_pck_plc == nco_pck_plc_nil || nco_pck_plc == nco_pck_plc_upk */
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
} /* end loop over fl_idx */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncpdq-specific memory cleanup */
if(dmn_rdr_nbr > 0){
if(dmn_rdr_nbr_in > 0) dmn_rdr_lst_in=nco_sng_lst_free(dmn_rdr_lst_in,dmn_rdr_nbr_in);
dmn_rvr_rdr=(nco_bool *)nco_free(dmn_rvr_rdr);
/* Free dimension list pointers */
for(idx_rdr=0; idx_rdr<dmn_rdr_nbr_trv; idx_rdr++){
dmn_rdr_trv[idx_rdr]->nm=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm);
dmn_rdr_trv[idx_rdr]->nm_fll=(char *)nco_free(dmn_rdr_trv[idx_rdr]->nm_fll);
dmn_rdr_trv[idx_rdr]=(dmn_sct *)nco_free(dmn_rdr_trv[idx_rdr]);
}
dmn_rdr_trv=(dmn_sct **)nco_free(dmn_rdr_trv);
/* Dimension structures in dmn_rdr are owned by dmn and dmn_out, free'd later */
} /* endif dmn_rdr_nbr > 0 */
if(nco_pck_plc != nco_pck_plc_nil){
if(nco_pck_plc_sng) nco_pck_plc_sng=(char *)nco_free(nco_pck_plc_sng);
if(nco_pck_map_sng) nco_pck_map_sng=(char *)nco_free(nco_pck_map_sng);
if(nco_pck_plc != nco_pck_plc_upk){
/* No need for loop over var_prc variables to free attribute values
Variable structures and attribute edit lists share same attribute values
Free them only once, and do it in nco_var_free() */
aed_lst_add_fst=(aed_sct *)nco_free(aed_lst_add_fst);
aed_lst_scl_fct=(aed_sct *)nco_free(aed_lst_scl_fct);
} /* nco_pck_plc == nco_pck_plc_upk */
} /* nco_pck_plc == nco_pck_plc_nil */
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]);
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr);
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
trv_tbl_free(trv_tbl);
for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm);
if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne);
if(gpe) gpe=(gpe_sct *)nco_gpe_free(gpe);
} /* !flg_mmr_cln */
#ifdef ENABLE_MPI
MPI_Finalize();
#endif /* !ENABLE_MPI */
/* End timer */
ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */
rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info);
if(rcd != NC_NOERR) nco_err_exit(rcd,"main");
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
mxnet_op.h | /*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <mxnet/base.h>
#include <algorithm>
namespace mxnet {
namespace op {
namespace mxnet_op {
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename OP, typename xpu>
struct Kernel;
template<typename OP>
struct Kernel<OP, cpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *s, int N, Args... args) {
#if (MXNET_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const int ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const int ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* datas,
DType a_min, DType a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType* grad, const DType* datas,
DType a_min, DType a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
#define REVERSE_MAX_DIM 10
struct reverse {
MSHADOW_XINLINE static int ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(int index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(int index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
GB_binop__iseq_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int32)
// A*D function (colscale): GB (_AxD__iseq_int32)
// D*A function (rowscale): GB (_DxB__iseq_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int32)
// C=scalar+B GB (_bind1st__iseq_int32)
// C=scalar+B' GB (_bind1st_tran__iseq_int32)
// C=A+scalar GB (_bind2nd__iseq_int32)
// C=A'+scalar GB (_bind2nd_tran__iseq_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__iseq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rbc_validator.c | //
// Created by cp723 on 2/7/2019.
//
#include <openssl/err.h>
#include <openssl/evp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#if defined(USE_MPI)
#include <mpi.h>
#else
#include <omp.h>
#endif
#include "crypto/cipher.h"
#include "crypto/ec.h"
#include "crypto/hash.h"
#include "perm.h"
#include "seed_iter.h"
#include "util.h"
#include "uuid.h"
#include "validator.h"
#if defined(USE_MPI)
#include "cmdline/cmdline_mpi.h"
#else
#include "cmdline/cmdline_omp.h"
#endif
enum StatusCode { SC_Found = 0, SC_NotFound = 1, SC_Failure = 2 };
// If using OpenMP, and using Clang 10+ or GCC 9+, support omp_pause_resource_all
#if !defined(USE_MPI) && \
((defined(__clang__) && __clang_major__ >= 10) || (!defined(__clang) && __GNUC__ >= 9))
#define OMP_DESTROY() \
if (omp_pause_resource_all(omp_pause_hard)) { \
fprintf(stderr, "ERROR: omp_pause_resource_all failed."); \
}
#else
#define OMP_DESTROY()
#endif
// By setting it to 0, we're assuming it'll be zeroified when arguments are first created
#define MODE_NONE 0
// Used with symmetric encryption
#define MODE_CIPHER 0b1
// Used with matching a public key
#define MODE_EC 0b10
// Used with matching a digest
#define MODE_HASH 0b100
// Used alongside MODE_HASH for a custom digest_size
#define MODE_XOF 0b1000
#define DEFAULT_XOF_SIZE 32
typedef struct Algo {
const char* abbr_name;
const char* full_name;
int nid;
int mode;
} Algo;
const Algo supportedAlgos[] = {
{"none", "None", 0, MODE_NONE},
// Cipher algorithms
{"aes", "AES-256-ECB", NID_aes_256_ecb, MODE_CIPHER},
{"chacha20", "ChaCha20", NID_chacha20, MODE_CIPHER},
// EC algorithms
{"ecc", "Secp256r1", NID_X9_62_prime256v1, MODE_EC},
// Hashing algorithms
{"md5", "MD5", NID_md5, MODE_HASH},
{"sha1", "SHA1", NID_sha1, MODE_HASH},
{"sha224", "SHA2-224", NID_sha224, MODE_HASH},
{"sha256", "SHA2-256", NID_sha256, MODE_HASH},
{"sha384", "SHA2-384", NID_sha384, MODE_HASH},
{"sha512", "SHA2-512", NID_sha512, MODE_HASH},
{"sha3-224", "SHA3-224", NID_sha3_224, MODE_HASH},
{"sha3-256", "SHA3-256", NID_sha3_256, MODE_HASH},
{"sha3-384", "SHA3-384", NID_sha3_384, MODE_HASH},
{"sha3-512", "SHA3-512", NID_sha3_512, MODE_HASH},
{"shake128", "SHAKE128", NID_shake128, MODE_HASH | MODE_XOF},
{"shake256", "SHAKE256", NID_shake256, MODE_HASH | MODE_XOF},
{"kang12", "KangarooTwelve", NID_kang12, MODE_HASH | MODE_XOF},
{0},
};
struct Params {
char *seed_hex, *client_crypto_hex, *uuid_hex, *iv_hex, *salt_hex;
};
const Algo* findAlgo(const char* abbr_name, const Algo* algos) {
while (algos->abbr_name != NULL) {
if (!strcmp(abbr_name, algos->abbr_name)) {
return algos;
}
algos++;
}
return NULL;
}
int checkUsage(int argc, const struct gengetopt_args_info* args_info) {
if (args_info->usage_given || argc < 2) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
if (args_info->inputs_num == 0) {
if (!args_info->random_flag && !args_info->benchmark_flag) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
} else if (args_info->mode_given) {
const Algo* algo = &(supportedAlgos[args_info->mode_arg]);
if (algo->mode == MODE_NONE || args_info->random_flag || args_info->benchmark_flag) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
}
return 0;
}
int validateArgs(const struct gengetopt_args_info* args_info) {
// Manually enforce requirement since built-in required not used with --usage
if (!args_info->mode_given) {
fprintf(stderr, "%s: --mode option is required\n", CMDLINE_PARSER_PACKAGE);
return 1;
}
if (args_info->mismatches_arg > SEED_SIZE * 8) {
fprintf(stderr, "--mismatches cannot exceed the seed size of 256-bits.\n");
return 1;
}
if (args_info->subkey_arg > SEED_SIZE * 8) {
fprintf(stderr, "--subkey cannot exceed the seed size of 256-bits.\n");
return 1;
} else if (args_info->subkey_arg < 1) {
fprintf(stderr, "--subkey must be at least 1.\n");
return 1;
}
#ifndef USE_MPI
if (args_info->threads_arg > omp_get_thread_limit()) {
fprintf(stderr, "--threads exceeds program thread limit.\n");
return 1;
}
#endif
if (args_info->mismatches_arg < 0) {
if (args_info->random_flag) {
fprintf(stderr, "--mismatches must be set and non-negative when using --random.\n");
return 1;
}
if (args_info->benchmark_flag) {
fprintf(stderr, "--mismatches must be set and non-negative when using --benchmark.\n");
return 1;
}
if (args_info->fixed_flag) {
fprintf(stderr, "--mismatches must be set and non-negative when using --fixed.\n");
return 1;
}
} else if (args_info->mismatches_arg > args_info->subkey_arg) {
fprintf(stderr, "--mismatches cannot be set larger than --subkey.\n");
return 1;
}
return 0;
}
int parse_params(struct Params* params, const struct gengetopt_args_info* args_info) {
if (args_info->inputs_num < 1) {
return 0;
}
if (strlen(args_info->inputs[0]) != SEED_SIZE * 2) {
fprintf(stderr, "HOST_SEED must be %d byte(s) long.\n", SEED_SIZE);
return 1;
}
params->seed_hex = args_info->inputs[0];
const Algo* algo = findAlgo(args_info->mode_orig, supportedAlgos);
if (algo->mode & MODE_CIPHER) {
if (args_info->inputs_num < 3 || args_info->inputs_num > 4) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
const EVP_CIPHER* evp_cipher = EVP_get_cipherbynid(algo->nid);
if (evp_cipher == NULL) {
fprintf(stderr, "Not a valid EVP cipher nid.\n");
return 1;
}
size_t block_len = EVP_CIPHER_block_size(evp_cipher);
if (strlen(args_info->inputs[1]) % block_len * 2 != 0) {
fprintf(stderr, "CLIENT_CIPHER not a multiple of the block size %zu bytes for %s\n",
block_len, algo->full_name);
return 1;
}
params->client_crypto_hex = args_info->inputs[1];
if (strlen(args_info->inputs[2]) != UUID_STR_LEN) {
fprintf(stderr, "UUID not %d characters long.\n", UUID_STR_LEN);
return 1;
}
params->uuid_hex = args_info->inputs[2];
if (args_info->inputs_num == 4) {
if (EVP_CIPHER_iv_length(evp_cipher) == 0) {
fprintf(stderr, "The chosen cipher doesn't require an IV.\n");
return 1;
}
if (strlen(args_info->inputs[3]) != EVP_CIPHER_iv_length(evp_cipher) * 2) {
fprintf(stderr,
"Length of IV doesn't match the chosen cipher's required IV"
" length match\n");
return 1;
}
params->iv_hex = args_info->inputs[3];
}
} else if (algo->mode & MODE_EC) {
if (args_info->inputs_num != 2) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
EC_GROUP* group = EC_GROUP_new_by_curve_name(algo->nid);
if (group == NULL) {
fprintf(stderr, "EC_GROUP_new_by_curve_name failed.\n");
return 1;
}
size_t order_len = (EC_GROUP_order_bits(group) + 7) / 8;
size_t comp_len = order_len + 1;
size_t uncomp_len = (order_len * 2) + 1;
if (strlen(args_info->inputs[1]) != comp_len * 2 &&
strlen(args_info->inputs[1]) != uncomp_len * 2) {
fprintf(stderr, "CLIENT_PUB_KEY not %zu nor %zu bytes for %s\n", comp_len, uncomp_len,
algo->full_name);
return 1;
}
EC_GROUP_free(group);
params->client_crypto_hex = args_info->inputs[1];
} else if (algo->mode & MODE_HASH) {
if (args_info->inputs_num < 2 || args_info->inputs_num > 3) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
if (!(algo->mode & MODE_XOF)) {
const EVP_MD* md = EVP_get_digestbynid(algo->nid);
if (md == NULL) {
fprintf(stderr,
"ERROR: EVP_get_digestbynid failed.\nOpenSSL Error:"
"%s\n",
ERR_error_string(ERR_get_error(), NULL));
return 1;
}
size_t digest_size = EVP_MD_size(md);
if (strlen(args_info->inputs[1]) != digest_size * 2) {
fprintf(stderr, "CLIENT_DIGEST not equivalent to %zu bytes for %s\n", digest_size,
algo->full_name);
return 1;
}
}
params->client_crypto_hex = args_info->inputs[1];
if (args_info->inputs_num > 2) {
params->salt_hex = args_info->inputs[2];
}
}
// MODE_NONE
else if (args_info->inputs_num != 1) {
fprintf(stderr, "%s\n", gengetopt_args_info_usage);
return 1;
}
return 0;
}
int parse_hex_handler(unsigned char* buffer, const char* hex) {
int status = parseHex(buffer, hex);
if (status == 1) {
fprintf(stderr, "ERROR: CIPHER had non-hexadecimal characters.\n");
} else if (status == 2) {
fprintf(stderr, "ERROR: CIPHER did not have even length.\n");
}
return status != 0;
}
/// OpenMP implementation
/// \return Returns a 0 on successfully finding a match, a 1 when unable to find a match,
/// and a 2 when a general error has occurred.
int main(int argc, char* argv[]) {
int my_rank;
#ifdef USE_MPI
int nprocs;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
#else
int core_count;
#endif
struct Params params;
struct gengetopt_args_info args_info;
unsigned char host_seed[SEED_SIZE];
unsigned char client_seed[SEED_SIZE];
const EVP_CIPHER* evp_cipher;
unsigned char client_cipher[EVP_MAX_BLOCK_LENGTH];
unsigned char uuid[UUID_SIZE];
unsigned char iv[EVP_MAX_IV_LENGTH];
EC_GROUP* ec_group;
EC_POINT* client_ec_point;
unsigned char* client_digest;
size_t digest_size;
const EVP_MD* md;
unsigned char* salt = NULL;
size_t salt_size = 0;
int mismatch, ending_mismatch;
int random_flag, benchmark_flag;
int all_flag, count_flag, verbose_flag;
int subseed_length;
const Algo* algo;
double start_time, duration, key_rate;
long long int validated_keys = 0;
int found, subfound;
memset(¶ms, 0, sizeof(params));
// Parse arguments
if (cmdline_parser(argc, argv, &args_info)) {
#ifdef USE_MPI
MPI_Finalize();
#else
OMP_DESTROY()
#endif
return SC_Failure;
}
if (checkUsage(argc, &args_info)) {
#ifdef USE_MPI
MPI_Finalize();
#else
OMP_DESTROY()
#endif
return EXIT_SUCCESS;
}
if (validateArgs(&args_info) || parse_params(¶ms, &args_info)) {
#ifdef USE_MPI
MPI_Finalize();
#else
OMP_DESTROY()
#endif
return SC_Failure;
}
algo = findAlgo(args_info.mode_orig, supportedAlgos);
random_flag = args_info.random_flag;
benchmark_flag = args_info.benchmark_flag;
all_flag = args_info.all_flag;
count_flag = args_info.count_flag;
verbose_flag = args_info.verbose_flag;
subseed_length = args_info.subkey_arg;
mismatch = 0;
ending_mismatch = args_info.subkey_arg;
// If --fixed option was set, set the validation range to only use the --mismatches value.
if (args_info.fixed_flag) {
mismatch = args_info.mismatches_arg;
ending_mismatch = args_info.mismatches_arg;
}
// If --mismatches is set and non-negative, set the ending_mismatch to its value.
else if (args_info.mismatches_arg >= 0) {
ending_mismatch = args_info.mismatches_arg;
}
#ifndef USE_MPI
if (args_info.threads_arg > 0) {
omp_set_num_threads(args_info.threads_arg);
}
// omp_get_num_threads() must be called in a parallel region, but
// ensure that only one thread calls it
#pragma omp parallel default(none) shared(core_count)
#pragma omp single
core_count = omp_get_num_threads();
#endif
// Memory alloc/init
if (algo->mode & MODE_CIPHER) {
evp_cipher = EVP_get_cipherbynid(algo->nid);
} else if (algo->mode & MODE_EC) {
if ((ec_group = EC_GROUP_new_by_curve_name(algo->nid)) == NULL) {
fprintf(stderr, "ERROR: EC_GROUP_new_by_curve_name failed.\nOpenSSL Error: %s\n",
ERR_error_string(ERR_get_error(), NULL));
OMP_DESTROY()
return SC_Failure;
}
if ((client_ec_point = EC_POINT_new(ec_group)) == NULL) {
fprintf(stderr, "ERROR: EC_POINT_new failed.\nOpenSSL Error: %s\n",
ERR_error_string(ERR_get_error(), NULL));
EC_GROUP_free(ec_group);
OMP_DESTROY()
return SC_Failure;
}
} else if (algo->mode & MODE_HASH) {
if (algo->nid != NID_kang12 && (md = EVP_get_digestbynid(algo->nid)) == NULL) {
fprintf(stderr, "ERROR: EVP_get_digestbynid failed.\nOpenSSL Error: %s\n",
ERR_error_string(ERR_get_error(), NULL));
// No need to deallocate an EVP_MD
OMP_DESTROY()
return SC_Failure;
}
if (algo->mode & MODE_XOF) {
if (random_flag || benchmark_flag) {
digest_size = DEFAULT_XOF_SIZE;
} else {
digest_size = (strlen(params.client_crypto_hex) + 1) / 2;
}
} else {
digest_size = EVP_MD_size(md);
}
if ((client_digest = malloc(digest_size)) == NULL) {
perror("ERROR");
OMP_DESTROY()
return SC_Failure;
}
if (params.salt_hex != NULL) {
salt_size = (strlen(params.salt_hex) + 1) / 2;
if ((salt = malloc(salt_size)) == NULL) {
perror("ERROR");
free(client_digest);
OMP_DESTROY()
return SC_Failure;
}
}
}
if (random_flag || benchmark_flag) {
#ifdef USE_MPI
if (my_rank == 0) {
#endif
gmp_randstate_t randstate;
// Set the gmp prng algorithm and set a seed based on the current time
gmp_randinit_default(randstate);
gmp_randseed_ui(randstate, (unsigned long)time(NULL));
getRandomSeed(host_seed, SEED_SIZE, randstate);
getRandomCorruptedSeed(client_seed, host_seed, args_info.mismatches_arg, SEED_SIZE,
subseed_length, randstate, benchmark_flag,
#ifdef USE_MPI
nprocs);
#else
core_count);
#endif
if (algo->mode & MODE_CIPHER) {
size_t iv_length = EVP_CIPHER_iv_length(evp_cipher);
if (iv_length > 0) {
getRandomSeed(iv, iv_length, randstate);
}
getRandomSeed(uuid, AES_BLOCK_SIZE, randstate);
if (evpEncrypt(client_cipher, NULL, evp_cipher, client_seed, uuid, UUID_SIZE, iv)) {
fprintf(stderr, "ERROR: Initial encryption failed.\nOpenSSL Error: %s\n",
ERR_error_string(ERR_get_error(), NULL));
OMP_DESTROY()
return SC_Failure;
}
} else if (algo->mode & MODE_EC) {
if (getEcPublicKey(client_ec_point, NULL, ec_group, client_seed, SEED_SIZE)) {
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
OMP_DESTROY()
return SC_Failure;
}
} else if (algo->mode & MODE_HASH) {
int hash_status;
// No EVP variant exists for KangarooTwelve
if (algo->nid == NID_kang12) {
hash_status =
kang12Hash(client_digest, digest_size, client_seed, SEED_SIZE, NULL, 0);
}
// No need to use the faster variants since this isn't a time critical step
else {
hash_status =
evpHash(client_digest, algo->mode & MODE_XOF ? &digest_size : NULL,
NULL, md, client_seed, SEED_SIZE, NULL, 0);
}
if (hash_status) {
if (salt_size > 0) {
free(salt);
}
free(client_digest);
OMP_DESTROY()
return SC_Failure;
}
}
// Clear GMP PRNG
gmp_randclear(randstate);
#ifdef USE_MPI
}
// Broadcast all of the relevant variable to every rank
MPI_Bcast(host_seed, SEED_SIZE, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
MPI_Bcast(client_seed, SEED_SIZE, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
if (algo->mode & MODE_CIPHER) {
MPI_Bcast(client_cipher, AES_BLOCK_SIZE, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
MPI_Bcast(uuid, UUID_SIZE, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
} else if (algo->mode & MODE_EC) {
unsigned char client_public_key[100];
int len;
if (my_rank == 0) {
if ((len = EC_POINT_point2oct(ec_group, client_ec_point,
POINT_CONVERSION_COMPRESSED, client_public_key,
sizeof(client_public_key), NULL)) == 0) {
fprintf(stderr, "ERROR: EC_POINT_point2oct failed.\nOpenSSL Error: %s\n",
ERR_error_string(ERR_get_error(), NULL));
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
return SC_Failure;
}
}
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(client_public_key, len, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
EC_POINT_oct2point(ec_group, client_ec_point, client_public_key, len, NULL);
} else if (algo->mode & MODE_HASH) {
MPI_Bcast(client_digest, digest_size, MPI_UNSIGNED_CHAR, 0, MPI_COMM_WORLD);
}
#endif
} else {
int parse_status = parse_hex_handler(host_seed, params.seed_hex);
if (!parse_status) {
if (algo->mode & MODE_CIPHER) {
parse_status = parse_hex_handler(client_cipher, params.client_crypto_hex);
if (!parse_status && uuid_parse(uuid, params.uuid_hex)) {
fprintf(stderr, "ERROR: UUID not in canonical form.\n");
parse_status = 1;
}
if (!parse_status && params.iv_hex != NULL) {
parse_status = parse_hex_handler(iv, params.iv_hex);
}
} else if (algo->mode & MODE_EC) {
if (EC_POINT_hex2point(ec_group, params.client_crypto_hex, client_ec_point, NULL) ==
NULL) {
fprintf(stderr, "ERROR: EC_POINT_hex2point failed.\nOpenSSL Error: %s\n",
ERR_error_string(ERR_get_error(), NULL));
parse_status = 1;
}
}
}
if (parse_status) {
if (algo->mode & MODE_EC) {
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
}
OMP_DESTROY()
return SC_Failure;
} else if (algo->mode & MODE_HASH) {
switch (parseHex(client_digest, params.client_crypto_hex)) {
case 1:
fprintf(stderr, "ERROR: CLIENT_DIGEST had non-hexadecimal characters.\n");
free(salt);
free(client_digest);
OMP_DESTROY()
return SC_Failure;
case 2:
fprintf(stderr, "ERROR: CLIENT_DIGEST did not have even length.\n");
free(salt);
free(client_digest);
OMP_DESTROY()
return SC_Failure;
default:
break;
}
if (params.salt_hex != NULL) {
switch (parseHex(salt, params.salt_hex)) {
case 1:
fprintf(stderr, "ERROR: SALT had non-hexadecimal characters.\n");
free(salt);
free(client_digest);
OMP_DESTROY()
return SC_Failure;
case 2:
fprintf(stderr, "ERROR: SALT did not have even length.\n");
free(salt);
free(client_digest);
OMP_DESTROY()
return SC_Failure;
default:
break;
}
}
}
}
if (verbose_flag
#ifdef USE_MPI
&& my_rank == 0
#endif
) {
fprintf(stderr, "INFO: Using HOST_SEED: ");
fprintHex(stderr, host_seed, SEED_SIZE);
fprintf(stderr, "\n");
if (random_flag || benchmark_flag) {
fprintf(stderr, "INFO: Using CLIENT_SEED (%d mismatches): ", args_info.mismatches_arg);
fprintHex(stderr, client_seed, SEED_SIZE);
fprintf(stderr, "\n");
}
if (algo->mode & MODE_CIPHER) {
char uuid_str[UUID_STR_LEN + 1];
fprintf(stderr, "INFO: Using %s CLIENT_CIPHER: %*s", algo->full_name,
(int)strlen(algo->full_name) - 4, "");
fprintHex(stderr, client_cipher, AES_BLOCK_SIZE);
fprintf(stderr, "\n");
// Convert the uuid to a string for printing
fprintf(stderr, "INFO: Using UUID: ");
uuid_unparse(uuid_str, uuid);
fprintf(stderr, "%s\n", uuid_str);
if (EVP_CIPHER_iv_length(evp_cipher) > 0) {
fprintf(stderr, "INFO: Using IV: ");
fprintHex(stderr, iv, EVP_CIPHER_iv_length(evp_cipher));
fprintf(stderr, "\n");
}
} else if (algo->mode & MODE_EC) {
if (random_flag || benchmark_flag) {
fprintf(stderr, "INFO: Using %s HOST_PUB_KEY:%*s", algo->full_name,
(int)strlen(algo->full_name) - 4, "");
if (fprintfEcPoint(stderr, ec_group, client_ec_point, POINT_CONVERSION_COMPRESSED,
NULL)) {
fprintf(stderr, "ERROR: fprintfEcPoint failed.\n");
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
OMP_DESTROY()
return SC_Failure;
}
fprintf(stderr, "\n");
}
fprintf(stderr, "INFO: Using %s CLIENT_PUB_KEY:%*s", algo->full_name,
(int)strlen(algo->full_name) - 6, "");
if (fprintfEcPoint(stderr, ec_group, client_ec_point, POINT_CONVERSION_COMPRESSED,
NULL)) {
fprintf(stderr, "ERROR: fprintfEcPoint failed.\n");
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
OMP_DESTROY()
return SC_Failure;
}
fprintf(stderr, "\n");
} else if (algo->mode & MODE_HASH) {
fprintf(stderr, "INFO: Using %s ", algo->full_name);
if (algo->mode & MODE_XOF) {
fprintf(stderr, "(%zu bytes) ", digest_size);
}
fprintf(stderr, "CLIENT_DIGEST: ");
fprintHex(stderr, client_digest, digest_size);
fprintf(stderr, "\n");
if (salt_size > 0) {
fprintf(stderr, "INFO: Using %s SALT: %*s", algo->full_name,
(int)strlen(algo->full_name), "");
fprintHex(stderr, salt, salt_size);
fprintf(stderr, "\n");
}
}
fflush(stderr);
}
found = 0;
#ifdef USE_MPI
start_time = MPI_Wtime();
#else
start_time = omp_get_wtime();
#endif
// clang-format off
for (; mismatch <= ending_mismatch && !found; mismatch++) {
if (verbose_flag
#ifdef USE_MPI
&& my_rank == 0
#endif
) {
fprintf(stderr, "INFO: Checking a hamming distance of %d...\n", mismatch);
fflush(stderr);
}
#ifndef USE_MPI
#pragma omp parallel default(none) \
shared(found, host_seed, client_seed, evp_cipher, client_cipher, iv, uuid, ec_group, \
client_ec_point, md, client_digest, digest_size, salt, salt_size, mismatch, \
validated_keys, algo, subseed_length, all_flag, count_flag, \
verbose_flag) private(subfound, my_rank)
{
long long int sub_validated_keys = 0;
my_rank = omp_get_thread_num();
#endif
size_t max_count;
mpz_t key_count, first_perm, last_perm;
int (*crypto_func)(const unsigned char*, void*) = NULL;
int (*crypto_cmp)(void*) = NULL;
void* v_args = NULL;
subfound = 0;
if (algo->mode & MODE_CIPHER) {
#ifndef ALWAYS_EVP_AES
// Use a custom implementation for improved speed
if (algo->nid == NID_aes_256_ecb) {
crypto_func = CryptoFunc_aes256;
crypto_cmp = CryptoCmp_aes256;
} else {
#endif
crypto_func = CryptoFunc_cipher;
crypto_cmp = CryptoCmp_cipher;
#ifndef ALWAYS_EVP_AES
}
#endif
v_args = CipherValidator_create(evp_cipher, client_cipher, uuid, UUID_SIZE,
EVP_CIPHER_iv_length(evp_cipher) > 0 ? iv : NULL);
} else if (algo->mode & MODE_EC) {
crypto_func = CryptoFunc_ec;
crypto_cmp = CryptoCmp_ec;
v_args = EcValidator_create(ec_group, client_ec_point);
} else if (algo->mode & MODE_HASH) {
if (algo->nid == NID_kang12) {
crypto_func = CryptoFunc_kang12;
crypto_cmp = CryptoCmp_kang12;
v_args = Kang12Validator_create(client_digest, digest_size, salt, salt_size);
} else {
crypto_func = CryptoFunc_hash;
crypto_cmp = CryptoCmp_hash;
v_args = HashValidator_create(md, client_digest, digest_size, salt, salt_size);
}
}
mpz_inits(key_count, first_perm, last_perm, NULL);
mpz_bin_uiui(key_count, subseed_length, mismatch);
// Only have this rank run if it's within range of possible keys
if (mpz_cmp_ui(key_count, (unsigned long)my_rank) > 0 && subfound >= 0) {
// Set the count of pairs to the range of possible keys if there are more ranks
// than possible keys
#ifdef USE_MPI
max_count = nprocs;
if (mpz_cmp_ui(key_count, nprocs) < 0) {
#else
max_count = omp_get_num_threads();
if (mpz_cmp_ui(key_count, omp_get_num_threads()) < 0) {
#endif
max_count = mpz_get_ui(key_count);
}
getPermPair(first_perm, last_perm, (size_t)my_rank, max_count, mismatch,
subseed_length);
#ifdef USE_MPI
subfound = findMatchingSeed(client_seed, host_seed, first_perm, last_perm, all_flag,
count_flag ? &validated_keys : NULL, &found, verbose_flag,
my_rank, max_count, crypto_func, crypto_cmp, v_args);
#else
subfound = findMatchingSeed(client_seed, host_seed, first_perm, last_perm, all_flag,
count_flag ? &sub_validated_keys : NULL, &found,
crypto_func, crypto_cmp, v_args);
#endif
}
mpz_clears(key_count, first_perm, last_perm, NULL);
if (algo->mode & MODE_CIPHER) {
CipherValidator_destroy(v_args);
} else if (algo->mode & MODE_EC) {
EcValidator_destroy(v_args);
} else if (algo->mode & MODE_HASH) {
if (algo->nid == NID_kang12) {
Kang12Validator_destroy(v_args);
} else {
HashValidator_destroy(v_args);
}
}
#ifdef USE_MPI
if (subfound < 0) {
// Cleanup
if (algo->mode & MODE_EC) {
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
} else if (algo->mode & MODE_HASH) {
if (salt_size > 0) {
free(salt);
}
free(client_digest);
}
}
#else
#pragma omp critical
{
// If the result is positive set the "global" found to 1. Will cause the other
// threads to prematurely stop.
if (subfound > 0) {
// If it isn't already found nor is there an error found,
if (!found) {
found = 1;
}
}
// If the result is negative, set a flag that an error has occurred, and stop the other
// threads. Will cause the other threads to prematurely stop.
else if (subfound < 0) {
found = -1;
}
validated_keys += sub_validated_keys;
}
}
#endif
}
if (algo->mode & MODE_EC) {
EC_POINT_free(client_ec_point);
EC_GROUP_free(ec_group);
} else if (algo->mode & MODE_HASH) {
if (salt_size > 0) {
free(salt);
}
free(client_digest);
}
#ifdef USE_MPI
if ((mismatch <= ending_mismatch) && !(all_flag) && subfound == 0 && !found) {
fprintf(stderr, "Rank %d Bleh\n", my_rank);
MPI_Recv(&found, 1, MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
duration = MPI_Wtime() - start_time;
fprintf(stderr, "INFO Rank %d: Clock time: %f s\n", my_rank, duration);
if (my_rank == 0) {
MPI_Reduce(MPI_IN_PLACE, &duration, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
} else {
MPI_Reduce(&duration, &duration, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
}
if (my_rank == 0 && verbose_flag) {
fprintf(stderr, "INFO: Max Clock time: %f s\n", duration);
}
if (count_flag) {
if (my_rank == 0) {
MPI_Reduce(MPI_IN_PLACE, &validated_keys, 1, MPI_LONG_LONG_INT, MPI_SUM, 0,
MPI_COMM_WORLD);
// Divide validated_keys by duration
key_rate = (double)validated_keys / duration;
fprintf(stderr, "INFO: Keys searched: %lld\n", validated_keys);
fprintf(stderr, "INFO: Keys per second: %.9g\n", key_rate);
} else {
MPI_Reduce(&validated_keys, &validated_keys, 1, MPI_LONG_LONG_INT, MPI_SUM, 0,
MPI_COMM_WORLD);
}
}
if (subfound) {
fprintHex(stdout, client_seed, SEED_SIZE);
printf("\n");
}
// Cleanup
MPI_Finalize();
return EXIT_SUCCESS;
#else
// Check if an error occurred in one of the threads.
if (found < 0) {
OMP_DESTROY()
return SC_Failure;
}
duration = omp_get_wtime() - start_time;
if (verbose_flag) {
fprintf(stderr, "INFO: Clock time: %f s\n", duration);
fprintf(stderr, "INFO: Found: %d\n", found);
}
if (count_flag) {
// Divide validated_keys by duration
key_rate = (double)validated_keys / duration;
fprintf(stderr, "INFO: Keys searched: %lld\n", validated_keys);
fprintf(stderr, "INFO: Keys per second: %.9g\n", key_rate);
}
if (found > 0) {
fprintHex(stdout, client_seed, SEED_SIZE);
printf("\n");
}
OMP_DESTROY()
return found || algo->mode == MODE_NONE ? SC_Found : SC_NotFound;
#endif
}
// clang-format on
|
model_compute_cpu.c | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include "model.h"
int doCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, long long *data_bins,
int nbins, float *binb)
{
int i, j, k;
if (doSelf)
{
n2 = n1;
data2 = data1;
}
// #pragma omp parallel for
for (i = 0; i < ((doSelf) ? n1-1 : n1); i++)
{
const register float xi = data1[i].x;
const register float yi = data1[i].y;
const register float zi = data1[i].z;
#pragma omp parallel for
for (j = ((doSelf) ? i+1 : 0); j < n2; j++)
{
register float dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
register int min = 0;
register int max = nbins;
register int k, indx;
while (max > min+1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
#pragma omp critical
if (dot >= binb[min])
{
// #pragma omp critical
data_bins[min] += 1; /*k = min;*/
}
else if (dot < binb[max])
{
// #pragma omp critical
data_bins[max+1] += 1; /*k = max+1;*/
}
else
{
// #pragma omp critical
data_bins[max] += 1; /*k = max;*/
}
}
}
return 0;
}
|
threads.c | #include <stdio.h>
#include <omp.h>
int main()
{
int thread_id[1024] ;
for (int i=0; i < 1024; i++) thread_id[i] = -1;
//#pragma omp target map (tofrom: thread_id)
#pragma omp target parallel for num_threads(1024)
for (int i=0; i< 1024; i++) {
if (i >950)
printf ("Thread: %d\n", omp_get_thread_num());
thread_id[i] = omp_get_thread_num();
}
// SPMD: if (thread_id[1023] == 1023 ) {
int maxThrd = -1;
for (int i=0; i < 1024; i++) {
if (thread_id[i] > maxThrd)
maxThrd = thread_id[i];
}
printf("Max thread id %d\n", maxThrd);
if (maxThrd == 959) {
printf("Passed\n");
return 0;
} else {
printf("Failed\n");
return 1;
}
}
|
GB_unop__identity_bool_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_int64
// op(A') function: GB_unop_tran__identity_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_int64
(
bool *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundFactor(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
double
factor;
Image
*edge_image;
PixelInfo
background,
pixel;
RectangleInfo
edge_geometry;
register const Quantum
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetPixelInfoPixel(image,p,&background);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
factor=0.0;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
factor++;
p+=GetPixelChannels(edge_image);
}
}
factor/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(factor);
}
static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge)
{
double
factor;
factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(factor);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_factor,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_factor=GetMinEdgeBackgroundFactor(&edge);
for ( ; background_factor < percent_background;
background_factor=GetMinEdgeBackgroundFactor(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_factor) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_factor) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_factor) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_factor) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
bilevel;
register ssize_t
x;
register const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
bilevel=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
bilevel=MagickFalse;
break;
}
p+=GetPixelChannels(image);
}
if (bilevel == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
return(bilevel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
DRB020-privatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be put as private to avoid race condition
Data race pair: tmp@65 vs. tmp@66
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len];
#pragma omp parallel for
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for private(tmp)
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
for (i=0;i<len;i++)
printf("%d\n", a[i]);
return 0;
}
|
SpatialFullConvolutionMap.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialFullConvolutionMap.c"
#else
void THNN_(SpatialFullConvolutionMap_updateOutput)(
THNNState *state, THTensor *input, THTensor *output_, THTensor *weight, THTensor *bias,
THTensor *connTable, int nInputPlane, int nOutputPlane,
int dW, int dH)
{
THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous");
THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous");
THArgCheck(
weight != NULL && weight->nDimension == 3
&& connTable != NULL && connTable->size[0] == weight->size[0], 4,
"3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
const int kH = (int)weight->size[1];
const int kW = (int)weight->size[2];
THArgCheck(input != NULL && input->nDimension == 3, 2, "3D tensor expected");
THArgCheck(input->size[0] >= nInputPlane, 2, "invalid number of input planes");
THTensor_(resize3d)(
output_, nOutputPlane,
(input->size[1] - 1) * dH + kH,
(input->size[2] - 1) * dW + kW
);
/* contiguous */
input = THTensor_(newContiguous)(input);
THTensor* output = THTensor_(newContiguous)(output_);
/* get raw pointers */
real *input_data = THTensor_(data)(input);
real *output_data = THTensor_(data)(output);
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
real *connTable_data = THTensor_(data)(connTable);
/* and dims */
const long input_h = input->size[1];
const long input_w = input->size[2];
const long output_h = output->size[1];
const long output_w = output->size[2];
const long weight_h = weight->size[1];
const long weight_w = weight->size[2];
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nOutputPlane; p++)
{
/* add bias */
real *ptr_output = output_data + p*output_w*output_h;
long j;
int nweight;
long k;
for (j = 0; j < output_h*output_w; j++)
ptr_output[j] = bias_data[p];
/* convolve all maps */
nweight = connTable->size[0];
for (k = 0; k < nweight; k++)
{
/* get offsets for input/output */
int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE;
int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE;
if (o == p)
{
THTensor_(fullConv2Dptr)(
output_data + o*output_w*output_h,
1.0,
input_data + i*input_w*input_h, input_h, input_w,
weight_data + k*weight_w*weight_h, weight_h, weight_w,
dH, dW
);
}
}
}
/* clean up */
THTensor_(free)(input);
THTensor_(freeCopyTo)(output, output_);
}
void THNN_(SpatialFullConvolutionMap_updateGradInput)(
THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput_, THTensor *weight, THTensor *bias,
THTensor *connTable, int nInputPlane, int nOutputPlane,
int dW, int dH)
{
THArgCheck(
weight != NULL && weight->nDimension == 3
&& connTable != NULL && connTable->size[0] == weight->size[0], 5,
"3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
/* contiguous */
THTensor* gradInput = THTensor_(newContiguous)(gradInput_);
gradOutput = THTensor_(newContiguous)(gradOutput);
/* Resize/Zero */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* get raw pointers */
real *gradInput_data = THTensor_(data)(gradInput);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *weight_data = THTensor_(data)(weight);
real *connTable_data = THTensor_(data)(connTable);
/* and dims */
const long input_h = input->size[1];
const long input_w = input->size[2];
const long output_h = gradOutput->size[1];
const long output_w = gradOutput->size[2];
const long kH = weight->size[1];
const long kW = weight->size[2];
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nInputPlane; p++)
{
long k;
/* backward all */
int nkernel = connTable->size[0];
for (k = 0; k < nkernel; k++)
{
int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE;
int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE;
if (i == p)
{
/* gradient to input */
THTensor_(validXCorr2Dptr)(
gradInput_data + i*input_w*input_h,
1.0,
gradOutput_data + o*output_w*output_h, output_h, output_w,
weight_data + k*kW*kH, kH, kW,
dH, dW
);
}
}
}
/* clean up */
THTensor_(freeCopyTo)(gradInput, gradInput_);
THTensor_(free)(gradOutput);
}
void THNN_(SpatialFullConvolutionMap_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *connTable,
int nInputPlane,
int nOutputPlane,
int dW, int dH,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(
gradWeight != NULL && gradWeight->nDimension == 3
&& connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
"3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
/* contiguous */
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
/* get raw pointers */
real *input_data = THTensor_(data)(input);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *gradWeight_data = THTensor_(data)(gradWeight);
real *gradBias_data = THTensor_(data)(gradBias);
/* and dims */
const long input_h = input->size[1];
const long input_w = input->size[2];
const long output_h = gradOutput->size[1];
const long output_w = gradOutput->size[2];
const long weight_h = gradWeight->size[1];
const long weight_w = gradWeight->size[2];
/* gradients wrt bias */
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nOutputPlane; k++)
{
real *ptr_gradOutput = gradOutput_data + k*output_w*output_h;
long l;
for (l = 0; l < output_h*output_w; l++)
gradBias_data[k] += scale*ptr_gradOutput[l];
}
/* gradients wrt weight */
int nkernel = connTable->size[0];
#pragma omp parallel for private(k)
for (k = 0; k < nkernel; k++)
{
int o = (int)THTensor_(get2d)(connTable,k,1) - TH_INDEX_BASE;
int i = (int)THTensor_(get2d)(connTable,k,0) - TH_INDEX_BASE;
/* gradient to kernel */
THTensor_(validXCorr2DRevptr)(
gradWeight_data + k*weight_w*weight_h,
scale,
gradOutput_data + o*output_w*output_h, output_h, output_w,
input_data + i*input_w*input_h, input_h, input_w,
dH, dW
);
}
/* clean up */
THTensor_(free)(input);
THTensor_(free)(gradOutput);
}
#endif
|
omp_reduction_bug_fixed.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
float dotprod(float * a, float * b, size_t N)
{
int i, tid;
float sum = 0;
#pragma omp parallel for \
shared(a, b, N) \
reduction(+: sum)
for (i = 0; i < N; ++i)
{
sum += a[i] * b[i];
tid = omp_get_thread_num();
printf("tid = %d i = %d\n", tid, i);
}
return sum;
}
int main (int argc, char *argv[])
{
const size_t N = 100;
int i, tid;
float sum, seq_sum;
float a[N], b[N];
seq_sum = 0;
for (i = 0; i < N; ++i)
{
a[i] = b[i] = (float)i;
seq_sum += a[i] * b[i];
}
sum = dotprod(a, b, N);
assert(fabs(sum - seq_sum) < 1.e-7);
printf("Sum = %f, seq_sum = %f\n", sum, seq_sum);
return 0;
}
|
forces.c |
/*
* Compute forces and accumulate the virial and the potential
*/
extern double epot, vir;
void
forces(int npart, double x[], double f[], double side, double rcoff){
int i;
#pragma omp single
{
vir = 0.0;
epot = 0.0;
}
#pragma omp for reduction(+:epot,vir) schedule(static,32)
for (i=0; i<npart*3; i+=3) {
// zero force components on particle i
double fxi = 0.0;
double fyi = 0.0;
double fzi = 0.0;
int j;
// loop over all particles with index > i
for (j=i+3; j<npart*3; j+=3) {
// compute distance between particles i and j allowing for wraparound
double xx = x[i]-x[j];
double yy = x[i+1]-x[j+1];
double zz = x[i+2]-x[j+2];
if (xx< (-0.5*side) ) xx += side;
if (xx> (0.5*side) ) xx -= side;
if (yy< (-0.5*side) ) yy += side;
if (yy> (0.5*side) ) yy -= side;
if (zz< (-0.5*side) ) zz += side;
if (zz> (0.5*side) ) zz -= side;
double rd = xx*xx+yy*yy+zz*zz;
// if distance is inside cutoff radius compute forces
// and contributions to pot. energy and virial
if (rd<=rcoff*rcoff) {
double rrd = 1.0/rd;
double rrd3 = rrd*rrd*rrd;
double rrd4 = rrd3*rrd;
double r148 = rrd4*(rrd3 - 0.5);
epot += rrd3*(rrd3-1.0);
vir -= rd*r148;
fxi += xx*r148;
fyi += yy*r148;
fzi += zz*r148;
#pragma omp atomic
f[j] -= xx*r148;
#pragma omp atomic
f[j+1] -= yy*r148;
#pragma omp atomic
f[j+2] -= zz*r148;
}
}
// update forces on particle i
#pragma omp atomic
f[i] += fxi;
#pragma omp atomic
f[i+1] += fyi;
#pragma omp atomic
f[i+2] += fzi;
}
}
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/wand.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MagickPathExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
Image
*image;
CacheView
*view;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue,
wand_view->exception);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=MagickWandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width);
wand_view->image=DestroyImage(wand_view->image);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~MagickWandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register ssize_t
x;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) duplex->extent.width; x++)
{
PixelSetQuantumPixel(duplex->image,duplex_pixels,
duplex->pixel_wands[id][x]);
duplex_pixels+=GetPixelChannels(duplex->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MagickPathExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != MagickWandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands)
{
PixelWand
***pixel_wands;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickWandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width);
wand_view->exception=exception;
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=MagickWandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickWandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->exception=exception;
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=MagickWandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == MagickWandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
pixels);
pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict pixels;
register ssize_t
x;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels);
pixels+=GetPixelChannels(source->image);
}
sync=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
convolution_3x3_packn_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch)
{
const int packn = csrr_vlenb() / 2;
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (__fp16)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
__fp16 tmp[8][8][packn];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * packn;
for (int m = 0; m < 8; m++)
{
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl);
vfloat16m1_t _r06 = vle16_v_f16m1(r0 + packn * 6, vl);
vfloat16m1_t _r07 = vle16_v_f16m1(r0 + packn * 7, vl);
vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r00, _r06, vl), 5.25f, vfsub_vv_f16m1(_r04, _r02, vl), vl);
vfloat16m1_t _tmp7m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r07, _r01, vl), 5.25f, vfsub_vv_f16m1(_r03, _r05, vl), vl);
vse16_v_f16m1(tmp[0][m], _tmp0m, vl);
vse16_v_f16m1(tmp[7][m], _tmp7m, vl);
vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r02, _r06, vl), -4.25f, _r04, vl);
vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r01, _r05, vl), -4.25f, _r03, vl);
vfloat16m1_t _tmp1m = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl);
vfloat16m1_t _tmp2m = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl);
vse16_v_f16m1(tmp[1][m], _tmp1m, vl);
vse16_v_f16m1(tmp[2][m], _tmp2m, vl);
vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl);
vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl);
vfloat16m1_t _tmp3m = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl);
vfloat16m1_t _tmp4m = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl);
vse16_v_f16m1(tmp[3][m], _tmp3m, vl);
vse16_v_f16m1(tmp[4][m], _tmp4m, vl);
vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_r06, 4.f, vfmacc_vf_f16m1(_r02, -1.25f, _r04, vl), vl);
vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl);
vfloat16m1_t _tmp5m = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl);
vfloat16m1_t _tmp6m = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl);
vse16_v_f16m1(tmp[5][m], _tmp5m, vl);
vse16_v_f16m1(tmp[6][m], _tmp6m, vl);
r0 += w * packn;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * packn;
__fp16* r0_tm_1 = r0_tm_0 + tiles * packn;
__fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
__fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
__fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
__fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
__fp16* r0_tm_6 = r0_tm_0 + tiles * packn * 6;
__fp16* r0_tm_7 = r0_tm_0 + tiles * packn * 7;
for (int m = 0; m < 8; m++)
{
vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl);
vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl);
vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl);
vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl);
vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl);
vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl);
vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl);
vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl);
vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f16m1(_tmp04, _tmp02, vl), vl);
vfloat16m1_t _r0tm7 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f16m1(_tmp03, _tmp05, vl), vl);
vfloat16m1_t _tmp12a = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl);
vfloat16m1_t _tmp12b = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl);
vfloat16m1_t _r0tm1 = vfadd_vv_f16m1(_tmp12a, _tmp12b, vl);
vfloat16m1_t _r0tm2 = vfsub_vv_f16m1(_tmp12a, _tmp12b, vl);
vfloat16m1_t _tmp34a = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl);
vfloat16m1_t _tmp34b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl);
vfloat16m1_t _r0tm3 = vfadd_vv_f16m1(_tmp34a, _tmp34b, vl);
vfloat16m1_t _r0tm4 = vfsub_vv_f16m1(_tmp34a, _tmp34b, vl);
vfloat16m1_t _tmp56a = vfmacc_vf_f16m1(_tmp06, 4.f, vfmacc_vf_f16m1(_tmp02, -1.25f, _tmp04, vl), vl);
vfloat16m1_t _tmp56b = vfmacc_vf_f16m1(vfmacc_vf_f16m1(vfmul_vf_f16m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl);
vfloat16m1_t _r0tm5 = vfadd_vv_f16m1(_tmp56a, _tmp56b, vl);
vfloat16m1_t _r0tm6 = vfsub_vv_f16m1(_tmp56a, _tmp56b, vl);
vse16_v_f16m1(r0_tm_0, _r0tm0, vl);
vse16_v_f16m1(r0_tm_1, _r0tm1, vl);
vse16_v_f16m1(r0_tm_2, _r0tm2, vl);
vse16_v_f16m1(r0_tm_3, _r0tm3, vl);
vse16_v_f16m1(r0_tm_4, _r0tm4, vl);
vse16_v_f16m1(r0_tm_5, _r0tm5, vl);
vse16_v_f16m1(r0_tm_6, _r0tm6, vl);
vse16_v_f16m1(r0_tm_7, _r0tm7, vl);
r0_tm_0 += tiles * packn * 8;
r0_tm_1 += tiles * packn * 8;
r0_tm_2 += tiles * packn * 8;
r0_tm_3 += tiles * packn * 8;
r0_tm_4 += tiles * packn * 8;
r0_tm_5 += tiles * packn * 8;
r0_tm_6 += tiles * packn * 8;
r0_tm_7 += tiles * packn * 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl);
vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl);
vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl);
vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(r0, vl);
vse16_v_f16m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
__fp16 val4 = *r0++;
__fp16 val5 = *r0++;
__fp16 val6 = *r0++;
__fp16 val7 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl);
vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl);
vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl);
vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl);
// NOTE c99 variable length array
__fp16 tmp[6][8][packn];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * packn;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * packn * 6;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * packn * 7;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * packn;
// TODO rvv optimize
for (int m = 0; m < 8; m++)
{
vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl);
vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl);
vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl);
vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl);
vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl);
vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl);
vfloat16m1_t _out0tm6 = vle16_v_f16m1(output0_tm_6, vl);
vfloat16m1_t _out0tm7 = vle16_v_f16m1(output0_tm_7, vl);
vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl);
vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl);
vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl);
vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl);
vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_out0tm5, _out0tm6, vl);
vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_out0tm5, _out0tm6, vl);
vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl);
vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl);
vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl);
vse16_v_f16m1(tmp[0][m], _tmp0m, vl);
vse16_v_f16m1(tmp[2][m], _tmp2m, vl);
vse16_v_f16m1(tmp[4][m], _tmp4m, vl);
vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl);
vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl);
vfloat16m1_t _tmp5m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl);
vse16_v_f16m1(tmp[1][m], _tmp1m, vl);
vse16_v_f16m1(tmp[3][m], _tmp3m, vl);
vse16_v_f16m1(tmp[5][m], _tmp5m, vl);
output0_tm_0 += tiles * packn * 8;
output0_tm_1 += tiles * packn * 8;
output0_tm_2 += tiles * packn * 8;
output0_tm_3 += tiles * packn * 8;
output0_tm_4 += tiles * packn * 8;
output0_tm_5 += tiles * packn * 8;
output0_tm_6 += tiles * packn * 8;
output0_tm_7 += tiles * packn * 8;
}
for (int m = 0; m < 6; m++)
{
vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl);
vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl);
vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl);
vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl);
vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl);
vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl);
vfloat16m1_t _tmp06 = vle16_v_f16m1(tmp[m][6], vl);
vfloat16m1_t _tmp07 = vle16_v_f16m1(tmp[m][7], vl);
vfloat16m1_t _tmp024a = vfadd_vv_f16m1(_tmp01, _tmp02, vl);
vfloat16m1_t _tmp135a = vfsub_vv_f16m1(_tmp01, _tmp02, vl);
vfloat16m1_t _tmp024b = vfadd_vv_f16m1(_tmp03, _tmp04, vl);
vfloat16m1_t _tmp135b = vfsub_vv_f16m1(_tmp03, _tmp04, vl);
vfloat16m1_t _tmp024c = vfadd_vv_f16m1(_tmp05, _tmp06, vl);
vfloat16m1_t _tmp135c = vfsub_vv_f16m1(_tmp05, _tmp06, vl);
vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp024a, vl), vfmacc_vf_f16m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl);
vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl);
vfloat16m1_t _out04 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl);
vse16_v_f16m1(output0, _out00, vl);
vse16_v_f16m1(output0 + packn * 2, _out02, vl);
vse16_v_f16m1(output0 + packn * 4, _out04, vl);
vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl);
vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl);
vfloat16m1_t _out05 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp07, _tmp135a, vl), vfmacc_vf_f16m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl);
vse16_v_f16m1(output0 + packn, _out01, vl);
vse16_v_f16m1(output0 + packn * 3, _out03, vl);
vse16_v_f16m1(output0 + packn * 5, _out05, vl);
output0 += outw * packn;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch)
{
const int packn = csrr_vlenb() / 2;
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 36; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (__fp16)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
__fp16 tmp[6][6][packn];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * packn;
for (int m = 0; m < 6; m++)
{
vfloat16m1_t _r00 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _r01 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _r02 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _r03 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _r04 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _r05 = vle16_v_f16m1(r0 + packn * 5, vl);
vfloat16m1_t _tmp0m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl);
vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_r04, _r03, vl), -4.f, vfadd_vv_f16m1(_r01, _r02, vl), vl);
vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r03, vl), 4.f, vfsub_vv_f16m1(_r01, _r02, vl), vl);
vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), -2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl);
vfloat16m1_t _tmp4m = vfmacc_vf_f16m1(vfsub_vv_f16m1(_r04, _r02, vl), 2.f, vfsub_vv_f16m1(_r01, _r03, vl), vl);
vfloat16m1_t _tmp5m = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl);
vse16_v_f16m1(tmp[0][m], _tmp0m, vl);
vse16_v_f16m1(tmp[1][m], _tmp1m, vl);
vse16_v_f16m1(tmp[2][m], _tmp2m, vl);
vse16_v_f16m1(tmp[3][m], _tmp3m, vl);
vse16_v_f16m1(tmp[4][m], _tmp4m, vl);
vse16_v_f16m1(tmp[5][m], _tmp5m, vl);
r0 += w * packn;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * packn;
__fp16* r0_tm_1 = r0_tm_0 + tiles * packn;
__fp16* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
__fp16* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
__fp16* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
__fp16* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
for (int m = 0; m < 6; m++)
{
vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl);
vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl);
vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl);
vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl);
vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl);
vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl);
vfloat16m1_t _r0tm0 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl);
vfloat16m1_t _r0tm1 = vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f16m1(_tmp01, _tmp02, vl), vl);
vfloat16m1_t _r0tm2 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f16m1(_tmp01, _tmp02, vl), vl);
vfloat16m1_t _r0tm3 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl);
vfloat16m1_t _r0tm4 = vfmacc_vf_f16m1(vfsub_vv_f16m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f16m1(_tmp01, _tmp03, vl), vl);
vfloat16m1_t _r0tm5 = vfmacc_vf_f16m1(vfmacc_vf_f16m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl);
vse16_v_f16m1(r0_tm_0, _r0tm0, vl);
vse16_v_f16m1(r0_tm_1, _r0tm1, vl);
vse16_v_f16m1(r0_tm_2, _r0tm2, vl);
vse16_v_f16m1(r0_tm_3, _r0tm3, vl);
vse16_v_f16m1(r0_tm_4, _r0tm4, vl);
vse16_v_f16m1(r0_tm_5, _r0tm5, vl);
r0_tm_0 += tiles * packn * 6;
r0_tm_1 += tiles * packn * 6;
r0_tm_2 += tiles * packn * 6;
r0_tm_3 += tiles * packn * 6;
r0_tm_4 += tiles * packn * 6;
r0_tm_5 += tiles * packn * 6;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl);
vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl);
vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl);
vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if RVV_SPEC_0_7
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(r0, vl);
vse16_v_f16m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
__fp16 val4 = *r0++;
__fp16 val5 = *r0++;
__fp16 val6 = *r0++;
__fp16 val7 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl);
vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl);
vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl);
vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
vfloat16m1_t _bias0 = bias ? vle16_v_f16m1((const __fp16*)bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl);
// NOTE variable length array
__fp16 tmp[4][6][packn];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * packn;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * packn;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
__fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * packn;
// TODO rvv optimize
for (int m = 0; m < 6; m++)
{
vfloat16m1_t _out0tm0 = vle16_v_f16m1(output0_tm_0, vl);
vfloat16m1_t _out0tm1 = vle16_v_f16m1(output0_tm_1, vl);
vfloat16m1_t _out0tm2 = vle16_v_f16m1(output0_tm_2, vl);
vfloat16m1_t _out0tm3 = vle16_v_f16m1(output0_tm_3, vl);
vfloat16m1_t _out0tm4 = vle16_v_f16m1(output0_tm_4, vl);
vfloat16m1_t _out0tm5 = vle16_v_f16m1(output0_tm_5, vl);
vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_out0tm1, _out0tm2, vl);
vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_out0tm1, _out0tm2, vl);
vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_out0tm3, _out0tm4, vl);
vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_out0tm3, _out0tm4, vl);
vfloat16m1_t _tmp0m = vfadd_vv_f16m1(vfadd_vv_f16m1(_out0tm0, _tmp02a, vl), _tmp02b, vl);
vfloat16m1_t _tmp1m = vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl);
vfloat16m1_t _tmp2m = vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl);
vfloat16m1_t _tmp3m = vfmacc_vf_f16m1(vfadd_vv_f16m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl);
vse16_v_f16m1(tmp[0][m], _tmp0m, vl);
vse16_v_f16m1(tmp[1][m], _tmp1m, vl);
vse16_v_f16m1(tmp[2][m], _tmp2m, vl);
vse16_v_f16m1(tmp[3][m], _tmp3m, vl);
output0_tm_0 += tiles * packn * 6;
output0_tm_1 += tiles * packn * 6;
output0_tm_2 += tiles * packn * 6;
output0_tm_3 += tiles * packn * 6;
output0_tm_4 += tiles * packn * 6;
output0_tm_5 += tiles * packn * 6;
}
for (int m = 0; m < 4; m++)
{
vfloat16m1_t _tmp00 = vle16_v_f16m1(tmp[m][0], vl);
vfloat16m1_t _tmp01 = vle16_v_f16m1(tmp[m][1], vl);
vfloat16m1_t _tmp02 = vle16_v_f16m1(tmp[m][2], vl);
vfloat16m1_t _tmp03 = vle16_v_f16m1(tmp[m][3], vl);
vfloat16m1_t _tmp04 = vle16_v_f16m1(tmp[m][4], vl);
vfloat16m1_t _tmp05 = vle16_v_f16m1(tmp[m][5], vl);
vfloat16m1_t _tmp02a = vfadd_vv_f16m1(_tmp01, _tmp02, vl);
vfloat16m1_t _tmp13a = vfsub_vv_f16m1(_tmp01, _tmp02, vl);
vfloat16m1_t _tmp02b = vfadd_vv_f16m1(_tmp03, _tmp04, vl);
vfloat16m1_t _tmp13b = vfsub_vv_f16m1(_tmp03, _tmp04, vl);
vfloat16m1_t _out00 = vfadd_vv_f16m1(_bias0, vfadd_vv_f16m1(vfadd_vv_f16m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl);
vfloat16m1_t _out01 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp13a, 2.f, _tmp13b, vl), vl);
vfloat16m1_t _out02 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(_tmp02a, 4.f, _tmp02b, vl), vl);
vfloat16m1_t _out03 = vfadd_vv_f16m1(_bias0, vfmacc_vf_f16m1(vfadd_vv_f16m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl);
vse16_v_f16m1(output0, _out00, vl);
vse16_v_f16m1(output0 + packn, _out01, vl);
vse16_v_f16m1(output0 + packn * 2, _out02, vl);
vse16_v_f16m1(output0 + packn * 3, _out03, vl);
output0 += outw * packn;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/timer-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv,
OpenCLCacheInfo *info)
{
ssize_t
i;
for (i=0; i < (ssize_t) info->event_count; i++)
clEnv->library->clReleaseEvent(info->events[i]);
info->events=(cl_event *) RelinquishMagickMemory(info->events);
DestroySemaphoreInfo(&info->events_semaphore);
if (info->buffer != (cl_mem) NULL)
{
clEnv->library->clReleaseMemObject(info->buffer);
info->buffer=(cl_mem) NULL;
}
return((OpenCLCacheInfo *) RelinquishMagickMemory(info));
}
static void CL_API_CALL RelinquishPixelCachePixelsDelayed(
cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status),
void *user_data)
{
MagickCLEnv
clEnv;
OpenCLCacheInfo
*info;
PixelPacket
*pixels;
ssize_t
i;
magick_unreferenced(event);
magick_unreferenced(event_command_exec_status);
info=(OpenCLCacheInfo *) user_data;
clEnv=GetDefaultOpenCLEnv();
for (i=(ssize_t)info->event_count-1; i >= 0; i--)
{
cl_int
event_status;
cl_uint
status;
status=clEnv->library->clGetEventInfo(info->events[i],
CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL);
if ((status == CL_SUCCESS) && (event_status > CL_COMPLETE))
{
clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE,
&RelinquishPixelCachePixelsDelayed,info);
return;
}
}
pixels=info->pixels;
RelinquishMagickResource(MemoryResource,info->length);
(void) RelinquishOpenCLCacheInfo(clEnv,info);
(void) RelinquishAlignedMemory(pixels);
}
static MagickBooleanType RelinquishOpenCLBuffer(
CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *) NULL);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return(MagickFalse);
RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl);
return(MagickTrue);
}
static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info,
cl_uint *event_count)
{
cl_event
*events;
size_t
i;
assert(opencl_info != (OpenCLCacheInfo *) NULL);
events=(cl_event *) NULL;
LockSemaphoreInfo(opencl_info->events_semaphore);
*event_count=opencl_info->event_count;
if (*event_count > 0)
{
events=AcquireQuantumMemory(*event_count,sizeof(*events));
if (events == (cl_event *) NULL)
*event_count=0;
else
{
for (i=0; i < opencl_info->event_count; i++)
events[i]=opencl_info->events[i];
}
}
UnlockSemaphoreInfo(opencl_info->events_semaphore);
return(events);
}
#endif
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A d d O p e n C L E v e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddOpenCLEvent() adds an event to the list of operations the next operation
% should wait for.
%
% The format of the AddOpenCLEvent() method is:
%
% void AddOpenCLEvent(const Image *image,cl_event event)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event: the event that should be added.
%
*/
extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event)
{
CacheInfo
*magick_restrict cache_info;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
assert(event != (cl_event) NULL);
cache_info=(CacheInfo *)image->cache;
assert(cache_info->opencl != (OpenCLCacheInfo *) NULL);
clEnv=GetDefaultOpenCLEnv();
if (clEnv->library->clRetainEvent(event) != CL_SUCCESS)
{
clEnv->library->clWaitForEvents(1,&event);
return;
}
LockSemaphoreInfo(cache_info->opencl->events_semaphore);
if (cache_info->opencl->events == (cl_event *) NULL)
{
cache_info->opencl->events=AcquireMagickMemory(sizeof(
*cache_info->opencl->events));
cache_info->opencl->event_count=1;
}
else
cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events,
++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events));
if (cache_info->opencl->events == (cl_event *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
cache_info->opencl->events[cache_info->opencl->event_count-1]=event;
UnlockSemaphoreInfo(cache_info->opencl->events_semaphore);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads,
2*sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const PixelPacket
*magick_restrict r;
IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
MagickOffsetType
n;
NexusInfo
**magick_restrict clip_nexus;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
clip_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (const PixelPacket *) NULL))
return(MagickFalse);
n=0;
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
mask_alpha=QuantumScale*GetPixelIntensity(image,r);
if (fabs(mask_alpha) >= MagickEpsilon)
{
SetPixelRed(q,mask_alpha*MagickOver_((MagickRealType) p->red,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->red,
(MagickRealType) GetPixelOpacity(q)));
SetPixelGreen(q,mask_alpha*MagickOver_((MagickRealType) p->green,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->green,
(MagickRealType) GetPixelOpacity(q)));
SetPixelBlue(q,mask_alpha*MagickOver_((MagickRealType) p->blue,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->blue,
(MagickRealType) GetPixelOpacity(q)));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+n,GetPixelIndex(indexes+n));
}
p++;
q++;
r++;
n++;
}
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*
sizeof(*cache_info->indexes));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (RelinquishOpenCLBuffer(cache_info) != MagickFalse)
{
cache_info->pixels=(PixelPacket *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_context
context;
cl_int
status;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *)image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *)image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
clEnv=GetDefaultOpenCLEnv();
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
{
assert(cache_info->pixels != NULL);
context=GetOpenCLContext(clEnv);
cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory(
sizeof(*cache_info->opencl));
(void) memset(cache_info->opencl,0,sizeof(*cache_info->opencl));
cache_info->opencl->events_semaphore=AllocateSemaphoreInfo();
cache_info->opencl->length=cache_info->length;
cache_info->opencl->pixels=cache_info->pixels;
cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context,
CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status);
if (status != CL_SUCCESS)
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
clEnv->library->clRetainMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return((cl_mem) NULL);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O p e n C L E v e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOpenCLEvents() returns the events that the next operation should wait
% for. The argument event_count is set to the number of events.
%
% The format of the GetOpenCLEvents() method is:
%
% const cl_event *GetOpenCLEvents(const Image *image,
% cl_command_queue queue)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event_count: will be set to the number of events.
%
*/
extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image,
cl_uint *event_count)
{
CacheInfo
*magick_restrict cache_info;
cl_event
*events;
assert(image != (const Image *) NULL);
assert(event_count != (cl_uint *) NULL);
cache_info=(CacheInfo *) image->cache;
*event_count=0;
events=(cl_event *) NULL;
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
events=CopyOpenCLEvents(cache_info->opencl,event_count);
return(events);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
CacheInfo
*magick_restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_epoch=GetMagickTime();
cache_timelimit=GetMagickResourceLimit(TimeResource);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset;
if (extent != 0)
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const IndexPacket
*magick_restrict virtual_indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
virtual_index,
*magick_restrict indexes;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
PixelPacket
*magick_restrict pixels,
*magick_restrict q,
virtual_pixel;
ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
q=pixels;
indexes=nexus_info->indexes;
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=(IndexPacket) 0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (v < (ssize_t) rows)
return((const PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ApplyPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (fabs((double) (alpha-TransparentOpacity)) < MagickEpsilon)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const PixelPacket
*magick_restrict r;
IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
MagickOffsetType
n;
MagickPixelPacket
alpha,
beta;
NexusInfo
**magick_restrict mask_nexus;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
mask_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,mask_nexus[0],&image->exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (const PixelPacket *) NULL))
return(MagickFalse);
n=0;
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
SetMagickPixelPacket(image,p,indexes+n,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+n,&beta);
ApplyPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+n,GetPixelIndex(indexes+n));
p++;
q++;
r++;
n++;
}
}
mask_nexus=DestroyPixelCacheNexus(mask_nexus,1);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*hosts,
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=DiskCache;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->active_index_channel=cache_info->active_index_channel;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
clone_info->channels=cache_info->channels;
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
IndexPacket
*magick_restrict q;
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
IndexPacket
*magick_restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
PixelPacket
*magick_restrict q;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
if ((length/sizeof(PixelPacket)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
PixelPacket
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrcit cache_info,const MapMode mode,
% const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) length));
if (nexus_info->cache != (PixelPacket *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (PixelPacket *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static PixelPacket *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
status=MagickTrue;
if (nexus_info->cache == (PixelPacket *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
{
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
return((PixelPacket *) NULL);
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been
% completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *)NULL);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (OpenCLCacheInfo *)NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl != (OpenCLCacheInfo *)NULL)
{
cl_event
*events;
cl_uint
event_count;
clEnv=GetDefaultOpenCLEnv();
events=CopyOpenCLEvents(cache_info->opencl,&event_count);
if (events != (cl_event *) NULL)
{
cl_command_queue
queue;
cl_context
context;
cl_int
status;
PixelPacket
*pixels;
context=GetOpenCLContext(clEnv);
queue=AcquireOpenCLCommandQueue(clEnv);
pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue,
cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0,
cache_info->length,event_count,events,NULL,&status);
assert(pixels == cache_info->pixels);
events=(cl_event *) RelinquishMagickMemory(events);
RelinquishOpenCLCommandQueue(clEnv,queue);
}
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *)NULL);
cache_info = (CacheInfo *)image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const IndexPacket
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
IndexPacket
*magick_restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const PixelPacket
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
PixelPacket
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
linAlgWeightedInnerProd.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C"
void FUNC(weightedInnerProd)(
const dlong & Nblocks,
const dlong & N,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
const dfloat * __restrict__ cpu_b,
dfloat * __restrict__ cpu_wab){
dfloat wab = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for reduction(+:wab)
#endif
for(int i=0;i<N;++i){
const dfloat ai = cpu_a[i];
const dfloat bi = cpu_b[i];
const dfloat wi = cpu_w[i];
wab += ai*bi*wi;
}
cpu_wab[0] = wab;
}
extern "C"
void FUNC(weightedInnerProdMany)(
const dlong & Nblocks,
const dlong & N,
const dlong & Nfields,
const dlong & offset,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
const dfloat * __restrict__ cpu_b,
dfloat * __restrict__ cpu_wab){
dfloat wab = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2) reduction(+:wab)
#endif
for(int fld=0;fld<Nfields;fld++) {
for(int i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
const dfloat bi = cpu_b[id];
const dfloat wi = cpu_w[i];
wab += ai*bi*wi;
}
}
cpu_wab[0] = wab;
}
|
DRB064-outeronly2-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
The inner loop has loop carried true data dependence.
However, the loop is not parallelized so no race condition.
*/
double b[100][100];
int init()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name init#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name init#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
b[i][j]=(i*j);
}
}
_ret_val_0=0;
return _ret_val_0;
}
void foo(int n, int m)
{
int i, j;
/* Be careful about bounds of j */
#pragma cetus private(i, j)
#pragma loop name foo#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name foo#0#0
for (j=1; j<m; j ++ )
{
b[i][j]=b[i][j-1];
}
}
return ;
}
int print()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name print#0
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name print#0#0
for (j=0; j<100; j ++ )
{
printf("%lf\n", b[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
int main()
{
int _ret_val_0;
init();
foo(100, 100);
print();
_ret_val_0=0;
return _ret_val_0;
}
|
Pragma.h | //===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PragmaHandler and PragmaTable interfaces.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LEX_PRAGMA_H
#define LLVM_CLANG_LEX_PRAGMA_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
namespace clang {
class Preprocessor;
class Token;
class IdentifierInfo;
class PragmaNamespace;
/**
* \brief Describes how the pragma was introduced, e.g., with \#pragma,
* _Pragma, or __pragma.
*/
enum PragmaIntroducerKind {
/**
* \brief The pragma was introduced via \#pragma.
*/
PIK_HashPragma,
/**
* \brief The pragma was introduced via the C99 _Pragma(string-literal).
*/
PIK__Pragma,
/**
* \brief The pragma was introduced via the Microsoft
* __pragma(token-string).
*/
PIK___pragma
};
/// PragmaHandler - Instances of this interface defined to handle the various
/// pragmas that the language front-end uses. Each handler optionally has a
/// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with
/// that identifier is found. If a handler does not match any of the declared
/// pragmas the handler with a null identifier is invoked, if it exists.
///
/// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g.
/// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other
/// pragmas.
class PragmaHandler {
std::string Name;
public:
explicit PragmaHandler(StringRef name) : Name(name) {}
PragmaHandler() {}
virtual ~PragmaHandler();
StringRef getName() const { return Name; }
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
/// using a dynamic_cast, but doesn't require RTTI.
virtual PragmaNamespace *getIfNamespace() { return nullptr; }
};
/// EmptyPragmaHandler - A pragma handler which takes no action, which can be
/// used to ignore particular pragmas.
class EmptyPragmaHandler : public PragmaHandler {
public:
explicit EmptyPragmaHandler(StringRef Name = StringRef());
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) override;
};
/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
/// allowing hierarchical pragmas to be defined. Common examples of namespaces
/// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces
/// may be (potentially recursively) defined.
class PragmaNamespace : public PragmaHandler {
/// Handlers - This is a map of the handlers in this namespace with their name
/// as key.
///
llvm::StringMap<PragmaHandler*> Handlers;
public:
explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {}
~PragmaNamespace() override;
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null name if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
PragmaHandler *FindHandler(StringRef Name,
bool IgnoreNull = true) const;
/// AddPragma - Add a pragma to this namespace.
///
void AddPragma(PragmaHandler *Handler);
/// RemovePragmaHandler - Remove the given handler from the
/// namespace.
void RemovePragmaHandler(PragmaHandler *Handler);
bool IsEmpty() {
return Handlers.empty();
}
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) override;
PragmaNamespace *getIfNamespace() override { return this; }
};
} // end namespace clang
#endif
|
rawBLAKE2_512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2012 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Re-used for BLAKE2 by Dhiru Kholia (dhiru at openwall.com)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawBLAKE2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawBLAKE2);
#else
#include "arch.h"
#include "blake2.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include <string.h>
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Raw-Blake2"
#define FORMAT_NAME ""
#if defined(__AVX__)
#define ALGORITHM_NAME "128/128 AVX"
#elif defined(__XOP__)
#define ALGORITHM_NAME "128/128 XOP"
#elif defined(__SSE4_1__)
#define ALGORITHM_NAME "128/128 SSE4.1"
#elif defined(__SSSE3__)
#define ALGORITHM_NAME "128/128 SSSE3"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "128/128 SSE2"
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#endif
#define FORMAT_TAG "$BLAKE2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 128
#define BINARY_SIZE 64
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"4245af08b46fbb290222ab8a68613621d92ce78577152d712467742417ebc1153668f1c9e1ec1e152a32a9c242dc686d175e087906377f0c483c5be2cb68953e", "blake2"},
{"$BLAKE2$021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0", "hello world"},
/* hash generated by multiple versions (in C and Go) of b2sum program */
{"$BLAKE2$1f7d9b7c9a90f7bfc66e52b69f3b6c3befbd6aee11aac860e99347a495526f30c9e51f6b0db01c24825092a09dd1a15740f0ade8def87e60c15da487571bcef7", "verystrongandlongpassword"},
/* test vectors from Wikipedia */
{"$BLAKE2$a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918", "The quick brown fox jumps over the lazy dog"},
{"$BLAKE2$786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce", ""},
{"$BLAKE2$da40d8f48e9e7560c56e2b92205aed6342a276994ca0287ea4f8c1423ef07d519ecb4bf8668c118379a36be8aa6c077bbc6213fa81fbb332fad9d8a19a7756e6", "UPPERCASE"},
{"$BLAKE2$f5ab8bafa6f2f72b431188ac38ae2de7bb618fb3d38b6cbf639defcdd5e10a86b22fccff571da37e42b23b80b657ee4d936478f582280a87d6dbb1da73f5c47d", "123456789"},
{NULL}
};
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)
[(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, FORMAT_TAG_LEN))
p += FORMAT_TAG_LEN;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[FORMAT_TAG_LEN + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
ciphertext += FORMAT_TAG_LEN;
memcpy(out, FORMAT_TAG, FORMAT_TAG_LEN);
memcpy(out + FORMAT_TAG_LEN, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(out + FORMAT_TAG_LEN);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
(void)blake2b((uint8_t *)crypt_out[index], saved_key[index], NULL, 64, saved_len[index], 0);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_rawBLAKE2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"BLAKE2b 512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__isle_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isle_int8
// A.*B function (eWiseMult): GB_AemultB__isle_int8
// A*D function (colscale): GB_AxD__isle_int8
// D*A function (rowscale): GB_DxB__isle_int8
// C+=B function (dense accum): GB_Cdense_accumB__isle_int8
// C+=b function (dense accum): GB_Cdense_accumb__isle_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int8
// C=scalar+B GB_bind1st__isle_int8
// C=scalar+B' GB_bind1st_tran__isle_int8
// C=A+scalar GB_bind2nd__isle_int8
// C=A'+scalar GB_bind2nd_tran__isle_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isle_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isle_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isle_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isle_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isle_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isle_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isle_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isle_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isle_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__isle_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__isle_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ten_tusscher_2004_epi_S2_6.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_6.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4894492745523,0.00131195917143657,0.777766728508175,0.777585290631298,0.000176758153308886,0.484269955752236,0.00295685208736128,0.999998321836575,1.95906508827956e-08,1.91104228355852e-05,0.999780202348919,1.00758709900846,0.999999232044404,3.34860480724804e-05,1.14889991276436,9.63921436951658,139.651596269085};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.9638301021152,0.000517766924545127,0.000156086968039485,0.000506187557537279,0.270335599525829,0.157560173074360,0.171504329558663,4.25106989337320,0.0167287504279960,2.10808214267720,1099.67407156799,0.000527967816332930,0.290348999546199,0.0200000000000000,0.00458996698216110,9.00005264542678e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
OMPMixedList.h | /** \file OMPMixedList.h*/
#ifndef MIXEDLIST__H
#define MIXEDLIST__H
//! A structure that allocates memory as lists on an arena
/** The MixedList structure allocates pieces of memory by request and in
* an atomic manner by giving out memory indices. This way multiple lists can reside in the same buffer in a compact manner.
* Nodes are inserted at athe start of the list, and they used in a way similar to stacks.
*/
typedef struct MixedList {
int size;
int buffer[12800];
} MixedList;
//! Initialize a MixedList structure.
/** The structure is set to empty.
\param list The structure to initialize
*/
void init_ml (MixedList* list) {
list->size = 0;
}
//! Insert a new node in the MixedList strcture
/** This function allocates a node for inserting a value, links the node to a previous node and sets the value. Returns the identifier
* of the node.
\param list The structure to initialize
\param top The identifier of the previous node
\param val The value to be inserted
*/
int push_back_ml (MixedList* list, int top, int val) {
int prevsize;
#pragma omp atomic capture
prevsize = (list->size)++;
if (prevsize < 3200) {
list->buffer[2*prevsize] = top;
list->buffer[2*prevsize+1] = val;
return prevsize;
} else {
#pragma omp atomic
(list->size)--;
return -1;
}
}
//! Get the successor of a node.
/** This function gets the identifier of a linked list node and retrieves the id of the successor
\param list The structure to initialize
\param top The identifier of the current node
*/
int next_ml (MixedList* list, int top) {
return list->buffer[2*top];
}
//! Get the value of a node.
/** This function gets the identifier of a linked list node and retrieves the value associated with the node
\param list The structure to initialize
\param top The identifier of the current node
*/
int fetch_ml (MixedList* list, int top) {
return list->buffer[2*top+1];
}
#endif
|
TiledFrameBuffer.h | #pragma once
#include <cstdint>
#include <cassert>
#include <vector>
#include "alloc16.h"
#include "LiteMath.h"
/**
\brief Frame Buffer
\param PackedColor -- uint32_t, uint16_t, uint8_t
\param FB_BIN_SIZE -- bin size; if 0, bins are not used and the framebuffer become 1-lvl!
\param FB_TILE_SIZE_X -- small tile size at x axis
\param FB_TILE_SIZE_Y -- small tile size at y axis
*/
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
struct FrameBufferTwoLvl
{
using ColorType = PackedColor;
void Resize(int a_x, int a_y);
void CopyToPitchLinear(uint32_t* a_data, int a_pitch, bool invertY = true);
void ClearColor (uint32_t a_color);
void ClearColorAndDepth(uint32_t a_color, float a_depth);
inline PackedColor* TileColor(int x, int y) { return m_color.data() + TileOffset(x,y); }
inline float* TileDepth(int x, int y) { return m_depth.data() + TileOffset(x,y); }
inline PackedColor* PixelColor(int x, int y) { return m_color.data() + PixelOffset(x,y); }
inline float* PixelDepth(int x, int y) { return m_depth.data() + PixelOffset(x,y); }
private:
constexpr static int TILES_IN_BIN_X = FB_BIN_SIZE/FB_TILE_SIZE_X;
constexpr static int TILES_IN_BIN_Y = FB_BIN_SIZE/FB_TILE_SIZE_Y;
constexpr static int PIXS_IN_TILE = FB_TILE_SIZE_X*FB_TILE_SIZE_Y;
constexpr static int TILES_IN_BIN = TILES_IN_BIN_X*TILES_IN_BIN_Y;
constexpr static int ALIGN_OF_TILE = sizeof(PackedColor)*(FB_TILE_SIZE_X*FB_TILE_SIZE_Y);
std::vector<float, aligned<float, 64> > m_depth;
std::vector<uint32_t, aligned<PackedColor, ALIGN_OF_TILE> > m_color;
int m_width;
int m_height;
int m_binsX;
int m_binsY;
int m_tilesTotalX;
int m_tilesTotalY;
inline int TileOffset(int x, int y);
inline int PixelOffset(int x, int y);
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
inline int FrameBufferTwoLvl<PackedColor, FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::TileOffset(int x, int y)
{
if(FB_BIN_SIZE != 0) // #static_if: assume compiler will opt this
{
assert(x % FB_TILE_SIZE_X == 0);
assert(y % FB_TILE_SIZE_Y == 0);
const int by = y/FB_BIN_SIZE;
const int bx = x/FB_BIN_SIZE;
const int y0 = y%FB_BIN_SIZE;
const int x0 = x%FB_BIN_SIZE;
const int tx = x0/FB_TILE_SIZE_X;
const int ty = y0/FB_TILE_SIZE_Y;
const int offToBin = (by*m_binsX + bx)*(FB_BIN_SIZE*FB_BIN_SIZE);
const int offToTile = (ty*TILES_IN_BIN_X + tx)*PIXS_IN_TILE;
assert( (offToBin + offToTile) % 16 == 0);
return offToBin + offToTile;
}
else
{
const int tx = x/FB_TILE_SIZE_X;
const int ty = y/FB_TILE_SIZE_Y;
return (ty*m_tilesTotalX + tx)*PIXS_IN_TILE;
}
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
inline int FrameBufferTwoLvl<PackedColor, FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::PixelOffset(int x, int y)
{
const int lx = x%FB_TILE_SIZE_X;
const int ly = y%FB_TILE_SIZE_Y;
return TileOffset(x,y) + ly*FB_TILE_SIZE_X + lx;
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::Resize(int a_x, int a_y)
{
m_width = a_x;
m_height = a_y;
if(FB_BIN_SIZE != 0)
{
assert(a_x % FB_BIN_SIZE == 0);
assert(a_y % FB_BIN_SIZE == 0);
m_binsX = a_x/FB_BIN_SIZE;
m_binsY = a_y/FB_BIN_SIZE;
}
else
{
m_binsX = 0;
m_binsY = 0;
}
m_tilesTotalX = a_x/FB_TILE_SIZE_X;
m_tilesTotalY = a_y/FB_TILE_SIZE_Y;
m_depth.resize(a_x*a_y);
m_color.resize(a_x*a_y);
}
inline static bool IsAligned(const void * ptr, std::uintptr_t alignment) noexcept
{
auto iptr = reinterpret_cast<std::uintptr_t>(ptr);
return !(iptr % alignment);
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::CopyToPitchLinear(uint32_t* a_data, int a_pitch, bool invertY = true)
{
const int binsTotal = m_binsY*m_binsX;
if(invertY)
{
for(int y=0; y<m_height; y+= FB_TILE_SIZE_Y)
{
for(int x=0; x<m_width; x += FB_TILE_SIZE_X)
{
const PackedColor* tilecolor = TileColor(x,y);
const cvex::vuint4 tileRow0 = cvex::load(tilecolor + 0);
const cvex::vuint4 tileRow1 = cvex::load(tilecolor + 4);
const cvex::vuint4 tileRow2 = cvex::load(tilecolor + 8);
const cvex::vuint4 tileRow3 = cvex::load(tilecolor + 12);
cvex::store(a_data + (m_height - (y + 0) - 1)*a_pitch + x, tileRow0);
cvex::store(a_data + (m_height - (y + 1) - 1)*a_pitch + x, tileRow1);
cvex::store(a_data + (m_height - (y + 2) - 1)*a_pitch + x, tileRow2);
cvex::store(a_data + (m_height - (y + 3) - 1)*a_pitch + x, tileRow3);
}
}
}
else
{
for(int y=0; y<m_height; y+= FB_TILE_SIZE_Y)
{
for(int x=0; x<m_width; x += FB_TILE_SIZE_X)
{
const PackedColor* tilecolor = TileColor(x,y);
const cvex::vuint4 tileRow0 = cvex::load(tilecolor + 0);
const cvex::vuint4 tileRow1 = cvex::load(tilecolor + 4);
const cvex::vuint4 tileRow2 = cvex::load(tilecolor + 8);
const cvex::vuint4 tileRow3 = cvex::load(tilecolor + 12);
cvex::store(a_data + (y + 0)*a_pitch + x, tileRow0);
cvex::store(a_data + (y + 1)*a_pitch + x, tileRow1);
cvex::store(a_data + (y + 2)*a_pitch + x, tileRow2);
cvex::store(a_data + (y + 3)*a_pitch + x, tileRow3);
}
}
}
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::ClearColor(uint32_t a_color)
{
if(a_color == 0)
{
memset(m_color.data(), 0, m_color.size()*sizeof(PackedColor));
return;
}
const cvex::vuint4 vcolor = cvex::splat(a_color);
const int size = (m_width*m_height);
for(int i=0; i<size; i+=8)
{
cvex::store(m_color.data() + i + 0, vcolor);
cvex::store(m_color.data() + i + 4, vcolor);
}
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::ClearColorAndDepth(uint32_t a_color, float a_depth)
{
if(m_color.size() == 0)
return;
const cvex::vuint4 vcolor = cvex::splat(a_color);
const cvex::vfloat4 vdepth = cvex::splat(a_depth);
const int size = (m_width*m_height);
//#pragma omp parallel for
for(int i=0; i<size; i+=8)
{
cvex::store(m_color.data() + i + 0, vcolor);
cvex::store(m_color.data() + i + 4, vcolor);
cvex::store(m_depth.data() + i + 0, vdepth);
cvex::store(m_depth.data() + i + 4, vdepth);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace FB
{
template<typename SrcType, typename DstType>
static inline DstType ColorPack(const SrcType r, const SrcType g, const SrcType b, const SrcType a)
{
return DstType(0);
}
template<typename SrcType, typename DstType>
static inline DstType ColorPack(const SrcType r, const SrcType g, const SrcType b)
{
return DstType(0);
}
template<typename SrcType, typename DstType>
static inline void ColorUNPack(const DstType colorOld, SrcType& r, SrcType& g, SrcType& b, SrcType& a)
{
}
//////////////////////////////////////////////////////////////////////////////////////// scalar
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
template<>
inline uint32_t ColorPack<float,uint32_t>(const float r, const float g, const float b, const float a)
{
constexpr float c_255 = 255.0f;
return (uint32_t(r * c_255) << 16) | // BGRA
(uint32_t(g * c_255) << 8) |
(uint32_t(b * c_255) << 0) |
(uint32_t(a * c_255) << 24);
}
template<>
inline uint32_t ColorPack<float,uint32_t>(const float r, const float g, const float b)
{
constexpr float c_255 = 255.0f;
return (uint32_t(r * c_255) << 16) | // BGRA
(uint32_t(g * c_255) << 8) |
(uint32_t(b * c_255) << 0);
}
template<>
inline void ColorUNPack<float,uint32_t>(const uint32_t colorOld,
float& r, float& g, float& b, float& a)
{
constexpr float c_255Inv = 1.0f/255.0f;
r = float( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = float( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = float( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = float( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
//////////////////////////////////////////////////////////////////////////////////////// vector4
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
template<>
inline cvex::vuint4 ColorPack<cvex::vfloat4,cvex::vuint4>(const cvex::vfloat4 r, const cvex::vfloat4 g, const cvex::vfloat4 b, const cvex::vfloat4 a)
{
const cvex::vfloat4 c_255 = cvex::splat(255.0f);
return (cvex::to_uint32(r * c_255) << 16) | // BGRA
(cvex::to_uint32(g * c_255) << 8) |
(cvex::to_uint32(b * c_255) << 0) |
(cvex::to_uint32(a * c_255) << 24);
}
template<>
inline cvex::vuint4 ColorPack<cvex::vfloat4,cvex::vuint4>(const cvex::vfloat4 r, const cvex::vfloat4 g, const cvex::vfloat4 b)
{
const cvex::vfloat4 c_255 = cvex::splat(255.0f);
return (cvex::to_uint32(r * c_255) << 16) | // BGRA
(cvex::to_uint32(g * c_255) << 8) |
(cvex::to_uint32(b * c_255) << 0);
}
template<>
inline void ColorUNPack<cvex::vfloat4,cvex::vuint4>(const cvex::vuint4 colorOld,
cvex::vfloat4& r, cvex::vfloat4& g, cvex::vfloat4& b, cvex::vfloat4& a)
{
const cvex::vfloat4 c_255Inv = cvex::splat(1.0f/255.0f);
r = cvex::to_float32( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = cvex::to_float32( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = cvex::to_float32( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = cvex::to_float32( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
//////////////////////////////////////////////////////////////////////////////////////// vector8
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
template<>
inline cvex8::vuint8 ColorPack<cvex8::vfloat8,cvex8::vuint8>(const cvex8::vfloat8 r, const cvex8::vfloat8 g, const cvex8::vfloat8 b, const cvex8::vfloat8 a)
{
const cvex8::vfloat8 c_255 = cvex8::splat(255.0f);
return (cvex8::to_uint32(r * c_255) << 16) | // BGRA
(cvex8::to_uint32(g * c_255) << 8) |
(cvex8::to_uint32(b * c_255) << 0) |
(cvex8::to_uint32(a * c_255) << 24);
}
template<>
inline cvex8::vuint8 ColorPack<cvex8::vfloat8,cvex8::vuint8>(const cvex8::vfloat8 r, const cvex8::vfloat8 g, const cvex8::vfloat8 b)
{
const cvex8::vfloat8 c_255 = cvex8::splat(255.0f);
return (cvex8::to_uint32(r * c_255) << 16) | // BGRA
(cvex8::to_uint32(g * c_255) << 8) |
(cvex8::to_uint32(b * c_255) << 0);
}
template<>
inline void ColorUNPack<cvex8::vfloat8,cvex8::vuint8>(const cvex8::vuint8 colorOld,
cvex8::vfloat8& r, cvex8::vfloat8& g, cvex8::vfloat8& b, cvex8::vfloat8& a)
{
const cvex8::vfloat8 c_255Inv = cvex8::splat(1.0f/255.0f);
r = cvex8::to_float32( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = cvex8::to_float32( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = cvex8::to_float32( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = cvex8::to_float32( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
//////////////////////////////////////////////////////////////////////////////////////// vector16
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
#ifndef WIN32
template<>
inline cvex16::vuint16 ColorPack<cvex16::vfloat16,cvex16::vuint16>(const cvex16::vfloat16 r, const cvex16::vfloat16 g, const cvex16::vfloat16 b, const cvex16::vfloat16 a)
{
const cvex16::vfloat16 c_255 = cvex16::splat(255.0f);
return (cvex16::to_uint32(r * c_255) << 16) | // BGRA
(cvex16::to_uint32(g * c_255) << 8) |
(cvex16::to_uint32(b * c_255) << 0) |
(cvex16::to_uint32(a * c_255) << 24);
}
template<>
inline cvex16::vuint16 ColorPack<cvex16::vfloat16,cvex16::vuint16>(const cvex16::vfloat16 r, const cvex16::vfloat16 g, const cvex16::vfloat16 b)
{
const cvex16::vfloat16 c_255 = cvex16::splat(255.0f);
return (cvex16::to_uint32(r * c_255) << 16) | // BGRA
(cvex16::to_uint32(g * c_255) << 8) |
(cvex16::to_uint32(b * c_255) << 0);
}
template<>
inline void ColorUNPack<cvex16::vfloat16,cvex16::vuint16>(const cvex16::vuint16 colorOld,
cvex16::vfloat16& r, cvex16::vfloat16& g, cvex16::vfloat16& b, cvex16::vfloat16& a)
{
const cvex16::vfloat16 c_255Inv = cvex16::splat(1.0f/255.0f);
r = cvex16::to_float32( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = cvex16::to_float32( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = cvex16::to_float32( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = cvex16::to_float32( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
#endif
}; |
conv_dw_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include "conv_dw_kernel_arm.h"
#include "conv_dw_k5_k7_kernel_arm.h"
#include "conv_dw_dilation_kernel_arm.h"
#ifdef __aarch64__
void dw_k3s2p0(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s2p0p1(float* data, int h, int w, float* kernel, float* output, float* bias, int out_w, int act);
void dw_k3s1p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
void dw_k3s2p1_a72(float* data, int h, int w, float* kernel, float* output, float* bias, int act);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int channel_size = input_h * input_w;
int channel_size_out = output_h * output_w;
int pad_h0 = pads[0];
int pad_h1 = pads[2];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s1p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
else if (pad_h0 == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
if (pad_h1 == 0)
dw_k3s2p0(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w, activation);
else
dw_k3s2p0p1(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, output_w,
activation);
}
}
else
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel_num; i++)
{
float* cur_input = input_buf + i * channel_size;
float* cur_output = output_buf + i * channel_size_out;
float* bias_tmp = NULL;
if (bias)
bias_tmp = bias + i;
dw_k3s2p1_a72(cur_input, input_h, input_w, weight_buf + i * 9, cur_output, bias_tmp, activation);
}
}
}
#else
void dw_k3s2(float* input, float* kernel, float* output, int channel, int width, int height, float* bias, int pad0);
void dw_k3s2_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s2_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias,
int pad0);
void dw_k3s1p1(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
void dw_k3s1p1_relu6_fused(float* input, float* kernel, float* output, int channel, int width, int height, float* bias);
static void DirectConv(float* input_buf, int input_h, int input_w, float* output_buf, int output_h, int output_w,
float* weight_buf, int channel_num, int stride, float* bias, int* pads, int activation,
int num_thread, int cpu_affinity)
{
int pad_h0 = pads[0];
if (stride == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s1p1_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
else
dw_k3s1p1_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
else
{
dw_k3s1p1(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias);
}
}
}
else if (stride == 2)
{
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num; c++)
{
float* cur_input = input_buf + c * input_h * input_w;
float* cur_output = output_buf + c * output_h * output_w;
float* cur_weight = weight_buf + c * 9;
float* cur_bias = bias ? bias + c : bias;
if (activation >= 0)
{
if (activation == 0)
dw_k3s2_relu_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
else
dw_k3s2_relu6_fused(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
else
{
dw_k3s2(cur_input, cur_weight, cur_output, 1, input_w, input_h, cur_bias, pad_h0);
}
}
}
}
#endif
int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_param* param, int num_thread, int cpu_affinity)
{
/* param */
int pads[4];
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
pads[0] = param->pad_h0;
pads[1] = param->pad_w0;
pads[2] = param->pad_h1;
pads[3] = param->pad_w1;
if (stride_h != stride_w)
return -1;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int out_c = output_tensor->dims[1] / group;
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int output_size = out_c * out_h * out_w;
/* buffer addr */
float* input_buf = ( float* )input_tensor->data;
float* kernel_buf = ( float* )filter_tensor->data;
float* output_buf = ( float* )output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor)
biases_buf = ( float* )bias_tensor->data;
for (int n = 0; n < batch; n++) // batch size
{
float* cur_input = input_buf + n * input_size * group;
float* cur_output = output_buf + n * output_size * group;
if (dilation_h != 1 && dilation_w != 1 && dilation_h == pads[0])
{
conv_dw_dilation_run(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, pads[0], act_type,
num_thread);
}
else if (kernel_h == 3 && kernel_w == 3)
{
DirectConv(cur_input, in_h, in_w, cur_output, out_h, out_w, kernel_buf, group, stride_h, biases_buf, pads,
act_type, num_thread, cpu_affinity);
}
else if (kernel_h == 5 && kernel_w == 5)
{
if (stride_h == 1)
depthwise_conv_k5s1(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
pads[0], pads[1], act_type, num_thread);
else if (stride_h == 2)
depthwise_conv_k5s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
else if (kernel_h == 7 && kernel_w == 7)
{
if (stride_h == 1)
depthwise_conv_k7s1(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
else if (stride_h == 2)
depthwise_conv_k7s2(cur_input, kernel_buf, biases_buf, cur_output, in_h, in_w, group, out_h, out_w,
act_type, num_thread);
}
}
return 0;
}
|
parallel_sort.h | #include "utility.h"
#ifndef PARALLEL_SORT_STL_H
#define PARALLEL_SORT_STL_H
namespace internal
{
std::size_t g_depth = 0L;
// The lower cutoff boundary
const std::size_t cutoff_low = 100;
// The higher cutoff boundary
const std::size_t cutoff_high = 1000;
template<class BidirIt, class _Pred>
void sort3v(BidirIt _First, BidirIt _Mid, BidirIt _Last, _Pred compare)
{
// If the last data item is less than the first, exchange these both items
if (compare(*_Last, *_First))
std::iter_swap(_First, _Last);
// If the middle data item is less than the first, exchange these both items
if (compare(*_Mid, *_First))
std::iter_swap(_First, _Mid);
// If the last data item is less than the middle, exchange these both items
if (compare(*_Last, *_Mid))
std::iter_swap(_Mid, _Last);
}
template<class BidirIt>
BidirIt med3v(BidirIt _First, BidirIt _Mid, BidirIt _Last)
{
if ((*_First <= *_Mid && *_First >= *_Last) ||
(*_First <= *_Last && *_First >= *_Mid))
// Return the first value if it is less than or equal to the middle value
return _First;
else if ((*_Mid <= *_First && *_Mid >= *_Last) ||
(*_Mid <= *_Last && *_Mid >= *_First))
// Return the middle value if it is less than or equal to the first value
return _Mid;
else if ((*_Last <= *_First && *_Last >= *_Mid) ||
(*_Last <= *_Mid && *_Last >= *_First))
// Return the last value if it is less than or equal to the first value
return _Last;
return _First;
}
template<class BidirIt>
BidirIt med9v(BidirIt _First, BidirIt _Mid, BidirIt _Last)
{
std::size_t _Size = 0L;
std::vector<BidirIt> median; median.resize(9);
// Compute the size of each fragment of the array
if ((_Size = (std::distance(_First, _Last) / 9)) >= 3)
{
//#pragma ivdep
//#pragma vector always
// Iterate through the array and copy each item
// located at the specific position to the temporary array
for (int index = 0; index < 9; index++)
median[index] = _First + _Size * index;
// Find the median of the first fragment
BidirIt _Med1 = med3v(median[0], median[1], median[2]);
// Find the median of the second fragment
BidirIt _Med2 = med3v(median[3], median[4], median[5]);
// Find the median of the third fragment
BidirIt _Med3 = med3v(median[6], median[7], median[8]);
// Compute the final value of the median-of-nine
return med3v(_Med1, _Med2, _Med3);
}
else {
return med3v(_First, _Mid, _Last);
}
}
template<class BidirIt, class _Pred>
void adjacent_sort(BidirIt _First, BidirIt _Last, _Pred compare)
{
// Iterate through the array of items in parallel
#pragma omp parallel for
for (auto _FwdIt = _First; _FwdIt != _Last - 1; _FwdIt++)
{
// For each item perform a check if the following item
// is greater than the next adjacent item. If so, exchange these items
if (!compare(*_FwdIt, *(_FwdIt + 1)) && *_FwdIt != *(_FwdIt + 1))
std::iter_swap(_FwdIt, _FwdIt + 1);
}
}
template<class BidirIt, class _Pred>
void shaker_sort(BidirIt _First, BidirIt _Last, _Pred compare)
{
BidirIt _LeftIt = _First, _RightIt = _Last, \
_MidIt = _LeftIt + (_RightIt - _LeftIt) / 2;
// Do the cocktail shaker sort by exchanging the data items from left and right
while (!compare(*_LeftIt, *_RightIt) && _RightIt > _MidIt)
std::iter_swap(_LeftIt++, _RightIt--);
}
template<class BidirIt, class _Pred>
void do_insertion(BidirIt _First, BidirIt _RevIt, BidirIt _Last, _Pred compare)
{
typename std::iterator_traits<BidirIt>::\
value_type _Value = *(_RevIt + 1);
// Iterate through the array from the current
// position downto the position of the first data item
// For each data item in the subset of previous data items
// perform a check if it's greater or equal to the specific
// value of argument
while (compare(_Value, *_RevIt))
{
// If so, do the insertion of the
// value of argument to the specific position
*(_RevIt + 1) = *_RevIt; *_RevIt = _Value;
if (_RevIt <= _First) break; _RevIt--;
}
}
template<class RandomIt, class _Pred>
void insertion_sort(RandomIt _First, RandomIt _Last, _Pred compare)
{
RandomIt _LeftIt = _First + 1, _RightIt = _Last;
// Perform the insertion sort by iterating through the array
while (_LeftIt <= _RightIt)
{
// For each data item make the number of calls
// to the function that performs the actual sorting
do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++;
do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++;
do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++;
do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++;
}
}
template<class RanIt, class _Pred>
void _qs3w(RanIt _First, RanIt _Last, _Pred compare)
{
// Check if the array size is not zero
if (_First >= _Last) return;
// Perform a check if the size of the array is equal to 1
if (std::distance(_First, _Last) == 1)
{
// If so, check if the value of the first item is greater
// than the value of the last item. If so, exchange these both items
if (*_First > *_Last)
std::iter_swap(_First, _Last);
// Terminate the process of sorting
return;
}
// Compute the size of the array to be sorted
std::size_t _Size = 0L; g_depth++;
if ((_Size = std::distance(_First, _Last)) > 0)
{
// Compute the middle of the array to be sorted
RanIt _LeftIt = _First, _RightIt = _Last,
_MidIt = _First + (_Last - _First) / 2;
bool is_swapped_left = false, is_swapped_right = false;
// Compute the value of median by using median-of-nine algorithm
RanIt _Median = internal::med9v(_LeftIt, _MidIt, _RightIt);
// Obtain the value of pivot equal to the value of median
typename std::iterator_traits<RanIt>::value_type _Pivot = *_Median;
// Iterate through the array to be sorted and for each item
// perform a check if it's less than the value of pivot
for (RanIt _FwdIt = _LeftIt; _FwdIt <= _RightIt; _FwdIt++)
{
// Check if the value of the item is less than the value of pivot
if (compare(*_FwdIt, _Pivot))
{
// If so, exchange the current data item with the next item from left
is_swapped_left = true;
std::iter_swap(_FwdIt, _LeftIt);
// Increment the value of pointer to the succeeding data item from left
_LeftIt++;
}
// Check if the value of the item is greater than the value of pivot
if (compare(_Pivot, *_FwdIt))
{
is_swapped_right = true;
// If so, exchange the current data item with the next item from right
std::iter_swap(_FwdIt, _RightIt);
// Decrement the value of pointer to the succeeding data item from right
// and the pointer to the current data item in the array
_RightIt--; _FwdIt--;
}
}
// Perform a check if the size is greater than
// the value of the higher cutting off boundary
if (_Size >= internal::cutoff_high)
{
// If so, launch parallel tasks to sort
// the either leftmost or rightmost part of the array
#pragma omp task untied mergeable
if (std::distance(_First, _LeftIt) > 0 && is_swapped_left)
internal::_qs3w(_First, _LeftIt, compare);
#pragma omp task untied mergeable
if (std::distance(_RightIt, _Last) > 0 && is_swapped_right)
internal::_qs3w(_RightIt, _Last, compare);
}
else
{
// Otherwise, merge the two concurrent parallel tasks into a single parallel task
// in which the leftmost and rightmost parts are sorted sequentially
#pragma omp parallel num_threads(12)
#pragma omp single nowait
{
if (std::distance(_First, _LeftIt) > 0 && is_swapped_left)
internal::_qs3w(_First, _LeftIt, compare);
if (std::distance(_RightIt, _Last) > 0 && is_swapped_right)
internal::_qs3w(_RightIt, _Last, compare);
}
}
}
}
template<class BidirIt, class _Pred >
inline std::pair<BidirIt, BidirIt> partition(BidirIt _First, \
BidirIt _Last, _Pred compare)
{
std::size_t _Size = 0L;
// Compute the actual size of the array. If the size is equal to 1
// then perform a tiny sort by exchanging the adjacent data items
if ((_Size = std::distance(_First, _Last)) == 1)
{
if (!compare(*_First, *_Last))
std::iter_swap(_First, _Last);
// Return the pair of pointers to the two subsequent
// partitions in the array to be sorted
return std::make_pair(_First, _Last);
}
if (_Size == 2)
{
// If the array size is equal to 2, perform a tiny sort by sorting three values.
internal::sort3v(_First, _First + 1, _Last, compare);
// Return the pair of pointers to the two subsequent
// partitions in the array to be sorted
return std::make_pair(_First, _Last);
}
// Compute the middle of the array to be sorted
BidirIt _LeftIt = _First, _RightIt = _Last, \
_MidIt = _LeftIt + _Size / 2;
// Perform a tiny sort of three values
// at the beginning, middle and the end of the array
internal::sort3v(_LeftIt, _MidIt, _RightIt, compare);
// Perform a tiny sort of three values at the middle of the array
internal::sort3v(_MidIt - 1, _MidIt, _MidIt + 1, compare);
// Compute the median by using median-of-nine algorithm
BidirIt _Median = internal::med9v(_LeftIt, _MidIt, _RightIt);
// Obtain the value of pivot based on the value of median
typename std::iterator_traits<BidirIt>::value_type _Pivot = *_Median;
// Perform a check if the first data item is equal to the value of median
if (*_First == _Pivot)
// If so, swap it with the middle data item
std::iter_swap(_First, _MidIt);
// Perform a check if the last data item is equal to the value of median
if (*_Last == _Pivot)
// If so, swap it with the middle data item
std::iter_swap(_Last, _MidIt);
// Perform partitioning iteratively until we've re-arranged the array to be sorted
// and obtained the pointers to the leftmost and rightmost partitions.
while (_LeftIt <= _RightIt)
{
// Iterate through the array from left and for each data item
// perform a check if it's greater than the value of pivot
for (; _LeftIt <= _Last; _LeftIt++)
{
// Perform a check if the value of the current
// data item is greater than the value of pivot or is equal to it
if (compare(_Pivot, *_LeftIt) || _Pivot == *_LeftIt)
{
// If so, perform the iteration through the array from the right
// until we've reached the data item which is already found from left
// For each item perform a check if it's not less than the value of pivot
for (; _RightIt >= _LeftIt; _RightIt--)
{
// If the current item's value is less than the value of pivot or
// at least equal to it, exchange the specific data item being found
if (compare(*_RightIt, _Pivot) || _Pivot == *_RightIt)
{
// Perform a check if the value of the pointer obtained from left
// is less than or equal to the value of pointer from the right
if (_LeftIt <= _RightIt)
{
// If so, exchange the specific data items
std::iter_swap(_LeftIt, _RightIt);
// Increment the pointer to the left item and
// decrement the pointer to the right item
_LeftIt++; _RightIt--;
}
// Terminate the loop execution
break;
}
}
// Terminate the loop execution
break;
}
}
}
// Return the pair of pointers to the two subsequent
// partitions in the array to be sorted
return std::make_pair(_LeftIt, _RightIt);
}
template<class BidirIt, class _Pred >
void intro_sort(BidirIt _First, BidirIt _Last, _Pred compare)
{
// Check if the array size is not zero
if (_First >= _Last) return;
std::size_t pos = 0L; g_depth++;
// Perform a check if the array has already been sorted.
// If so terminate the process of sorting
if (misc::sorted(_First, _Last, pos, compare)) return;
std::size_t _Size = 0L;
// Compute the actual size of the array and perform a check
// if the size does not exceed a lower cutting off boundary
if ((_Size = std::distance(_First, _Last)) > internal::cutoff_low)
{
BidirIt _LeftIt = _First, _RightIt = _Last;
// If so, partition the array by using Hoare's quicksort partitioning
std::pair<BidirIt, BidirIt> p \
= internal::partition(_First, _Last, compare);
// Perform a check if the size of the array does not
// exceed the higher cutting off boundary
if (_Size > internal::cutoff_high)
{
// If not, launch the first parallel task
// that will perform the improved 3-way quicksort at the backend
#pragma omp task untied mergeable
// Perform a check if the size of the partition is not zero and
// we're not performing an empty null-task
if (std::distance(p.first, _Last) > 0)
internal::_qs3w(p.first, _Last, compare);
// If not, launch the second parallel task
// that will perform the improved 3-way quicksort at the backend
#pragma omp task untied mergeable
// Perform a check if the size of the partition is not zero and
// we're not performing an empty null-task
if (std::distance(_First, p.second) > 0)
internal::_qs3w(_First, p.second, compare);
}
else
{
// Otherwise, merge the two concurrent parallel tasks into a single parallel task
// in which calls to the sorter routine are performed sequentially
#pragma omp parallel num_threads(12)
#pragma omp single nowait
{
if (std::distance(p.first, _Last) > 0)
internal::_qs3w(p.first, _Last, compare);
if (std::distance(_First, p.second) > 0)
internal::_qs3w(_First, p.second, compare);
}
}
}
else
{
// If the size of the array is less than 10^2,
// perform a regular insertion sort launched
// during the parallel task execution
#pragma omp parallel num_threads(12)
#pragma omp single nowait
{
if (std::distance(_First, _Last) > 0)
internal::insertion_sort(_First, _Last + 1, compare);
}
}
}
template<class BidirIt, class _Pred >
void parallel_sort1(BidirIt _First, BidirIt _Last, _Pred compare)
{
// Compute the potential size of the array to be sorted
std::size_t _Size = std::distance(_First, _Last);
// Find the value of the maximum data item in the array
typename std::iterator_traits<BidirIt>::value_type \
_MaxValue = *std::max_element(_First, _Last);
// Compute the radix MSD position for the size value
std::size_t _MaxSizeRadix = std::log10((double)_Size);
// Compute the radix MSD position for the maximum value
std::size_t _MaxValueRadix = std::log10((double)_MaxValue);
// Perform a check if the MSD position of the size is greater
// (e.g. the array is an interleave sequence)
if (_MaxSizeRadix > _MaxValueRadix)
{
// Partition the entire array into chunks of a fixed size
// based on finding sub-intervals for each particular chunk
misc::partitioner p(std::make_pair(0, _Size), \
omp_get_max_threads());
// Execute a parallel region in which the introspective sorter
// function is invoked for each particular chunks to be sorted
#pragma omp parallel num_threads(12)
{
volatile int tid = omp_get_thread_num();
internal::intro_sort(_First + p[tid].first(),
_First + p[tid].second(), compare);
}
// Perform a parallel task to perform a final sort that
// will arrange the entire array into an ordered sequence
#pragma omp parallel num_threads(12)
#pragma omp master
internal::intro_sort(_First, _Last - 1, compare);
}
else
{
// Otherwise, launch a parallel task to perform
// an introspective sort of the entire array
#pragma omp parallel num_threads(12)
#pragma omp master
internal::intro_sort(_First, _Last - 1, compare);
}
}
template<class BidirIt, class _Pred >
void parallel_sort(BidirIt _First, BidirIt _Last, _Pred compare)
{
std::size_t pos = 0L; g_depth = 0L;
// Compute the potential size of the array to be sorted
std::size_t _Size = std::distance(_First, _Last);
// Perform the parallel cocktail shaker sort
#pragma omp task untied mergeable
internal::shaker_sort(_First, _Last - 1, compare);
// Synchronize threads until the parallel task has completed its execution
#pragma omp taskwait
// Let's give a first chance check if the array has already been sorted and
// obtain the position of the first unsorted data item
if (!misc::sorted(_First, _Last, pos, compare))
{
BidirIt _LeftIt = _First, _RightIt = _First + pos;
// Compute the radix MSD position for the size value
std::size_t _MaxSizeRadix = std::log10((double)_Size);
// Compute the radix MSD position for the maximum value
std::size_t _MaxValueRadix = std::log10((double)*_RightIt);
// Perform a check if the MSD position of the size is greater
if (_MaxSizeRadix > _MaxValueRadix)
{
// If so, perform a check if the value of the first data item is
// equal to the value of the first unsorted item that occurs in the array
// (e.g. the array is an interleave sequence)
if (*_LeftIt == *(_RightIt + 1))
{
// Perform the parallel task that executes
// the 3-way quicksort routine at the backend
#pragma omp parallel num_threads(12)
#pragma omp master
internal::_qs3w(_First, _Last - 1, compare);
// Terminate the process of sorting.
return;
}
}
// Otherwise, if the array is not an interleave sequence
// execute a loop to sort the array by performing the introspective sort
do
{
// Perform the pre-sorting of the array by using adjacent sort
internal::adjacent_sort(_First, _Last, compare);
// Perform the actual sorting by launching the introspective sort
internal::parallel_sort1(_First, _Last, compare);
// Perform the last chance check if the array has already been sorted
// If not, proceed with the process of sorting over again until the entire array is sorted
} while (!misc::sorted(_First, _Last, pos, compare));
}
}
}
#endif // PARALLEL_SORT_STL_H
|
omp_task_red_taskloop.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int r;
int work(int k, int l)
{
return k + l + 1;
}
void bar(int i) {
#pragma omp taskgroup task_reduction(+:r)
{ int th_gen = omp_get_thread_num();
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 0);
printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 1);
printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
}
}
int foo() {
int i;
int th_gen = omp_get_thread_num();
#pragma omp taskgroup task_reduction(+:r)
{
bar(0);
}
printf("th %d passed bar0\n", th_gen);
#pragma omp taskloop reduction(+:r) firstprivate(th_gen)
for (i = 1; i < 4; ++i) {
bar(i);
printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i);
// #pragma omp task in_reduction(+:r)
r += i;
}
return 0;
}
// res = 2*((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 60
#define res 60
int main()
{
r = 0;
#pragma omp parallel num_threads(2)
foo();
if (r == res) {
return 0;
} else {
printf("error r = %d (!= %d)\n", r, res);
return 1;
}
}
|
shrinkwrap.c | /*
* $Id: shrinkwrap.c 40108 2011-09-11 02:50:01Z campbellbarton $
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) Blender Foundation.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): Andr Pinto
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/shrinkwrap.c
* \ingroup bke
*/
#include <string.h>
#include <float.h>
#include <math.h>
#include <memory.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "DNA_object_types.h"
#include "DNA_modifier_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_mesh_types.h"
#include "DNA_scene_types.h"
#include "BLI_editVert.h"
#include "BLI_math.h"
#include "BLI_utildefines.h"
#include "BKE_shrinkwrap.h"
#include "BKE_DerivedMesh.h"
#include "BKE_lattice.h"
#include "BKE_deform.h"
#include "BKE_mesh.h"
#include "BKE_subsurf.h"
/* Util macros */
#define OUT_OF_MEMORY() ((void)printf("Shrinkwrap: Out of memory\n"))
/* Benchmark macros */
#if !defined(_WIN32) && 0
#include <sys/time.h>
#define BENCH(a) \
do { \
double _t1, _t2; \
struct timeval _tstart, _tend; \
clock_t _clock_init = clock(); \
gettimeofday ( &_tstart, NULL); \
(a); \
gettimeofday ( &_tend, NULL); \
_t1 = ( double ) _tstart.tv_sec + ( double ) _tstart.tv_usec/ ( 1000*1000 ); \
_t2 = ( double ) _tend.tv_sec + ( double ) _tend.tv_usec/ ( 1000*1000 ); \
printf("%s: %fs (real) %fs (cpu)\n", #a, _t2-_t1, (float)(clock()-_clock_init)/CLOCKS_PER_SEC);\
} while(0)
#else
#define BENCH(a) (a)
#endif
typedef void ( *Shrinkwrap_ForeachVertexCallback) (DerivedMesh *target, float *co, float *normal);
/* get derived mesh */
//TODO is anyfunction that does this? returning the derivedFinal witouth we caring if its in edit mode or not?
DerivedMesh *object_get_derived_final(Object *ob)
{
Mesh *me= ob->data;
EditMesh *em = BKE_mesh_get_editmesh(me);
if(em) {
DerivedMesh *dm = em->derivedFinal;
BKE_mesh_end_editmesh(me, em);
return dm;
}
return ob->derivedFinal;
}
/* Space transform */
void space_transform_from_matrixs(SpaceTransform *data, float local[4][4], float target[4][4])
{
float itarget[4][4];
invert_m4_m4(itarget, target);
mul_serie_m4(data->local2target, itarget, local, NULL, NULL, NULL, NULL, NULL, NULL);
invert_m4_m4(data->target2local, data->local2target);
}
void space_transform_apply(const SpaceTransform *data, float *co)
{
mul_v3_m4v3(co, ((SpaceTransform*)data)->local2target, co);
}
void space_transform_invert(const SpaceTransform *data, float *co)
{
mul_v3_m4v3(co, ((SpaceTransform*)data)->target2local, co);
}
static void space_transform_apply_normal(const SpaceTransform *data, float *no)
{
mul_mat3_m4_v3( ((SpaceTransform*)data)->local2target, no);
normalize_v3(no); // TODO: could we just determine de scale value from the matrix?
}
static void space_transform_invert_normal(const SpaceTransform *data, float *no)
{
mul_mat3_m4_v3(((SpaceTransform*)data)->target2local, no);
normalize_v3(no); // TODO: could we just determine de scale value from the matrix?
}
/*
* Shrinkwrap to the nearest vertex
*
* it builds a kdtree of vertexs we can attach to and then
* for each vertex performs a nearest vertex search on the tree
*/
static void shrinkwrap_calc_nearest_vertex(ShrinkwrapCalcData *calc)
{
int i;
BVHTreeFromMesh treeData = NULL_BVHTreeFromMesh;
BVHTreeNearest nearest = NULL_BVHTreeNearest;
BENCH(bvhtree_from_mesh_verts(&treeData, calc->target, 0.0, 2, 6));
if(treeData.tree == NULL)
{
OUT_OF_MEMORY();
return;
}
//Setup nearest
nearest.index = -1;
nearest.dist = FLT_MAX;
#ifndef __APPLE__
#pragma omp parallel for default(none) private(i) firstprivate(nearest) shared(treeData,calc) schedule(static)
#endif
for(i = 0; i<calc->numVerts; ++i)
{
float *co = calc->vertexCos[i];
float tmp_co[3];
float weight = defvert_array_find_weight_safe(calc->dvert, i, calc->vgroup);
if(weight == 0.0f) continue;
//Convert the vertex to tree coordinates
if(calc->vert)
{
VECCOPY(tmp_co, calc->vert[i].co);
}
else
{
VECCOPY(tmp_co, co);
}
space_transform_apply(&calc->local2target, tmp_co);
//Use local proximity heuristics (to reduce the nearest search)
//
//If we already had an hit before.. we assume this vertex is going to have a close hit to that other vertex
//so we can initiate the "nearest.dist" with the expected value to that last hit.
//This will lead in prunning of the search tree.
if(nearest.index != -1)
nearest.dist = len_squared_v3v3(tmp_co, nearest.co);
else
nearest.dist = FLT_MAX;
BLI_bvhtree_find_nearest(treeData.tree, tmp_co, &nearest, treeData.nearest_callback, &treeData);
//Found the nearest vertex
if(nearest.index != -1)
{
//Adjusting the vertex weight, so that after interpolating it keeps a certain distance from the nearest position
float dist = sasqrt(nearest.dist);
if(dist > FLT_EPSILON) weight *= (dist - calc->keepDist)/dist;
//Convert the coordinates back to mesh coordinates
VECCOPY(tmp_co, nearest.co);
space_transform_invert(&calc->local2target, tmp_co);
interp_v3_v3v3(co, co, tmp_co, weight); //linear interpolation
}
}
free_bvhtree_from_mesh(&treeData);
}
/*
* This function raycast a single vertex and updates the hit if the "hit" is considered valid.
* Returns TRUE if "hit" was updated.
* Opts control whether an hit is valid or not
* Supported options are:
* MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE (front faces hits are ignored)
* MOD_SHRINKWRAP_CULL_TARGET_BACKFACE (back faces hits are ignored)
*/
int normal_projection_project_vertex(char options, const float *vert, const float *dir, const SpaceTransform *transf, BVHTree *tree, BVHTreeRayHit *hit, BVHTree_RayCastCallback callback, void *userdata)
{
float tmp_co[3], tmp_no[3];
const float *co, *no;
BVHTreeRayHit hit_tmp;
//Copy from hit (we need to convert hit rays from one space coordinates to the other
memcpy( &hit_tmp, hit, sizeof(hit_tmp) );
//Apply space transform (TODO readjust dist)
if(transf)
{
VECCOPY( tmp_co, vert );
space_transform_apply( transf, tmp_co );
co = tmp_co;
VECCOPY( tmp_no, dir );
space_transform_apply_normal( transf, tmp_no );
no = tmp_no;
hit_tmp.dist *= mat4_to_scale( ((SpaceTransform*)transf)->local2target );
}
else
{
co = vert;
no = dir;
}
hit_tmp.index = -1;
BLI_bvhtree_ray_cast(tree, co, no, 0.0f, &hit_tmp, callback, userdata);
if(hit_tmp.index != -1) {
/* invert the normal first so face culling works on rotated objects */
if(transf) {
space_transform_invert_normal(transf, hit_tmp.no);
}
if (options & (MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE|MOD_SHRINKWRAP_CULL_TARGET_BACKFACE)) {
/* apply backface */
const float dot= dot_v3v3(dir, hit_tmp.no);
if( ((options & MOD_SHRINKWRAP_CULL_TARGET_FRONTFACE) && dot <= 0.0f) ||
((options & MOD_SHRINKWRAP_CULL_TARGET_BACKFACE) && dot >= 0.0f)
) {
return FALSE; /* Ignore hit */
}
}
if(transf) {
/* Inverting space transform (TODO make coeherent with the initial dist readjust) */
space_transform_invert(transf, hit_tmp.co);
hit_tmp.dist = len_v3v3((float *)vert, hit_tmp.co);
}
memcpy(hit, &hit_tmp, sizeof(hit_tmp) );
return TRUE;
}
return FALSE;
}
static void shrinkwrap_calc_normal_projection(ShrinkwrapCalcData *calc)
{
int i;
//Options about projection direction
const char use_normal = calc->smd->shrinkOpts;
float proj_axis[3] = {0.0f, 0.0f, 0.0f};
//Raycast and tree stuff
BVHTreeRayHit hit;
BVHTreeFromMesh treeData= NULL_BVHTreeFromMesh;
//auxiliar target
DerivedMesh *auxMesh = NULL;
BVHTreeFromMesh auxData = NULL_BVHTreeFromMesh;
SpaceTransform local2aux;
//If the user doesn't allows to project in any direction of projection axis
//then theres nothing todo.
if((use_normal & (MOD_SHRINKWRAP_PROJECT_ALLOW_POS_DIR | MOD_SHRINKWRAP_PROJECT_ALLOW_NEG_DIR)) == 0)
return;
//Prepare data to retrieve the direction in which we should project each vertex
if(calc->smd->projAxis == MOD_SHRINKWRAP_PROJECT_OVER_NORMAL)
{
if(calc->vert == NULL) return;
}
else
{
//The code supports any axis that is a combination of X,Y,Z
//altought currently UI only allows to set the 3 diferent axis
if(calc->smd->projAxis & MOD_SHRINKWRAP_PROJECT_OVER_X_AXIS) proj_axis[0] = 1.0f;
if(calc->smd->projAxis & MOD_SHRINKWRAP_PROJECT_OVER_Y_AXIS) proj_axis[1] = 1.0f;
if(calc->smd->projAxis & MOD_SHRINKWRAP_PROJECT_OVER_Z_AXIS) proj_axis[2] = 1.0f;
normalize_v3(proj_axis);
//Invalid projection direction
if(dot_v3v3(proj_axis, proj_axis) < FLT_EPSILON)
return;
}
if(calc->smd->auxTarget)
{
auxMesh = object_get_derived_final(calc->smd->auxTarget);
if(!auxMesh)
return;
space_transform_setup( &local2aux, calc->ob, calc->smd->auxTarget);
}
//After sucessufuly build the trees, start projection vertexs
if( bvhtree_from_mesh_faces(&treeData, calc->target, 0.0, 4, 6)
&& (auxMesh == NULL || bvhtree_from_mesh_faces(&auxData, auxMesh, 0.0, 4, 6)))
{
#ifndef __APPLE__
#pragma omp parallel for private(i,hit) schedule(static)
#endif
for(i = 0; i<calc->numVerts; ++i)
{
float *co = calc->vertexCos[i];
float tmp_co[3], tmp_no[3];
float weight = defvert_array_find_weight_safe(calc->dvert, i, calc->vgroup);
if(weight == 0.0f) continue;
if(calc->vert)
{
/* calc->vert contains verts from derivedMesh */
/* this coordinated are deformed by vertexCos only for normal projection (to get correct normals) */
/* for other cases calc->varts contains undeformed coordinates and vertexCos should be used */
if(calc->smd->projAxis == MOD_SHRINKWRAP_PROJECT_OVER_NORMAL) {
VECCOPY(tmp_co, calc->vert[i].co);
normal_short_to_float_v3(tmp_no, calc->vert[i].no);
} else {
VECCOPY(tmp_co, co);
VECCOPY(tmp_no, proj_axis);
}
}
else
{
VECCOPY(tmp_co, co);
VECCOPY(tmp_no, proj_axis);
}
hit.index = -1;
hit.dist = 10000.0f; //TODO: we should use FLT_MAX here, but sweepsphere code isnt prepared for that
//Project over positive direction of axis
if(use_normal & MOD_SHRINKWRAP_PROJECT_ALLOW_POS_DIR)
{
if(auxData.tree)
normal_projection_project_vertex(0, tmp_co, tmp_no, &local2aux, auxData.tree, &hit, auxData.raycast_callback, &auxData);
normal_projection_project_vertex(calc->smd->shrinkOpts, tmp_co, tmp_no, &calc->local2target, treeData.tree, &hit, treeData.raycast_callback, &treeData);
}
//Project over negative direction of axis
if(use_normal & MOD_SHRINKWRAP_PROJECT_ALLOW_NEG_DIR && hit.index == -1)
{
float inv_no[3];
negate_v3_v3(inv_no, tmp_no);
if(auxData.tree)
normal_projection_project_vertex(0, tmp_co, inv_no, &local2aux, auxData.tree, &hit, auxData.raycast_callback, &auxData);
normal_projection_project_vertex(calc->smd->shrinkOpts, tmp_co, inv_no, &calc->local2target, treeData.tree, &hit, treeData.raycast_callback, &treeData);
}
if(hit.index != -1)
{
madd_v3_v3v3fl(hit.co, hit.co, tmp_no, calc->keepDist);
interp_v3_v3v3(co, co, hit.co, weight);
}
}
}
//free data structures
free_bvhtree_from_mesh(&treeData);
free_bvhtree_from_mesh(&auxData);
}
/*
* Shrinkwrap moving vertexs to the nearest surface point on the target
*
* it builds a BVHTree from the target mesh and then performs a
* NN matchs for each vertex
*/
static void shrinkwrap_calc_nearest_surface_point(ShrinkwrapCalcData *calc)
{
int i;
BVHTreeFromMesh treeData = NULL_BVHTreeFromMesh;
BVHTreeNearest nearest = NULL_BVHTreeNearest;
//Create a bvh-tree of the given target
BENCH(bvhtree_from_mesh_faces( &treeData, calc->target, 0.0, 2, 6));
if(treeData.tree == NULL)
{
OUT_OF_MEMORY();
return;
}
//Setup nearest
nearest.index = -1;
nearest.dist = FLT_MAX;
//Find the nearest vertex
#ifndef __APPLE__
#pragma omp parallel for default(none) private(i) firstprivate(nearest) shared(calc,treeData) schedule(static)
#endif
for(i = 0; i<calc->numVerts; ++i)
{
float *co = calc->vertexCos[i];
float tmp_co[3];
float weight = defvert_array_find_weight_safe(calc->dvert, i, calc->vgroup);
if(weight == 0.0f) continue;
//Convert the vertex to tree coordinates
if(calc->vert)
{
VECCOPY(tmp_co, calc->vert[i].co);
}
else
{
VECCOPY(tmp_co, co);
}
space_transform_apply(&calc->local2target, tmp_co);
//Use local proximity heuristics (to reduce the nearest search)
//
//If we already had an hit before.. we assume this vertex is going to have a close hit to that other vertex
//so we can initiate the "nearest.dist" with the expected value to that last hit.
//This will lead in prunning of the search tree.
if(nearest.index != -1)
nearest.dist = len_squared_v3v3(tmp_co, nearest.co);
else
nearest.dist = FLT_MAX;
BLI_bvhtree_find_nearest(treeData.tree, tmp_co, &nearest, treeData.nearest_callback, &treeData);
//Found the nearest vertex
if(nearest.index != -1)
{
if(calc->smd->shrinkOpts & MOD_SHRINKWRAP_KEEP_ABOVE_SURFACE)
{
//Make the vertex stay on the front side of the face
VECADDFAC(tmp_co, nearest.co, nearest.no, calc->keepDist);
}
else
{
//Adjusting the vertex weight, so that after interpolating it keeps a certain distance from the nearest position
float dist = sasqrt( nearest.dist );
if(dist > FLT_EPSILON)
interp_v3_v3v3(tmp_co, tmp_co, nearest.co, (dist - calc->keepDist)/dist); //linear interpolation
else
VECCOPY( tmp_co, nearest.co );
}
//Convert the coordinates back to mesh coordinates
space_transform_invert(&calc->local2target, tmp_co);
interp_v3_v3v3(co, co, tmp_co, weight); //linear interpolation
}
}
free_bvhtree_from_mesh(&treeData);
}
/* Main shrinkwrap function */
void shrinkwrapModifier_deform(ShrinkwrapModifierData *smd, Object *ob, DerivedMesh *dm, float (*vertexCos)[3], int numVerts)
{
DerivedMesh *ss_mesh = NULL;
ShrinkwrapCalcData calc = NULL_ShrinkwrapCalcData;
//remove loop dependencies on derived meshs (TODO should this be done elsewhere?)
if(smd->target == ob) smd->target = NULL;
if(smd->auxTarget == ob) smd->auxTarget = NULL;
//Configure Shrinkwrap calc data
calc.smd = smd;
calc.ob = ob;
calc.numVerts = numVerts;
calc.vertexCos = vertexCos;
//DeformVertex
calc.vgroup = defgroup_name_index(calc.ob, calc.smd->vgroup_name);
if(dm)
{
calc.dvert = dm->getVertDataArray(dm, CD_MDEFORMVERT);
}
else if(calc.ob->type == OB_LATTICE)
{
calc.dvert = lattice_get_deform_verts(calc.ob);
}
if(smd->target)
{
calc.target = object_get_derived_final(smd->target);
//TODO there might be several "bugs" on non-uniform scales matrixs
//because it will no longer be nearest surface, not sphere projection
//because space has been deformed
space_transform_setup(&calc.local2target, ob, smd->target);
//TODO: smd->keepDist is in global units.. must change to local
calc.keepDist = smd->keepDist;
}
calc.vgroup = defgroup_name_index(calc.ob, smd->vgroup_name);
if(dm != NULL && smd->shrinkType == MOD_SHRINKWRAP_PROJECT)
{
//Setup arrays to get vertexs positions, normals and deform weights
calc.vert = dm->getVertDataArray(dm, CD_MVERT);
calc.dvert = dm->getVertDataArray(dm, CD_MDEFORMVERT);
//Using vertexs positions/normals as if a subsurface was applied
if(smd->subsurfLevels)
{
SubsurfModifierData ssmd= {{NULL}};
ssmd.subdivType = ME_CC_SUBSURF; //catmull clark
ssmd.levels = smd->subsurfLevels; //levels
ss_mesh = subsurf_make_derived_from_derived(dm, &ssmd, FALSE, NULL, 0, 0, (ob->mode & OB_MODE_EDIT));
if(ss_mesh)
{
calc.vert = ss_mesh->getVertDataArray(ss_mesh, CD_MVERT);
if(calc.vert)
{
//TRICKY: this code assumes subsurface will have the transformed original vertices
//in their original order at the end of the vert array.
calc.vert = calc.vert + ss_mesh->getNumVerts(ss_mesh) - dm->getNumVerts(dm);
}
}
//Just to make sure we are not leaving any memory behind
assert(ssmd.emCache == NULL);
assert(ssmd.mCache == NULL);
}
}
//Projecting target defined - lets work!
if(calc.target)
{
switch(smd->shrinkType)
{
case MOD_SHRINKWRAP_NEAREST_SURFACE:
BENCH(shrinkwrap_calc_nearest_surface_point(&calc));
break;
case MOD_SHRINKWRAP_PROJECT:
BENCH(shrinkwrap_calc_normal_projection(&calc));
break;
case MOD_SHRINKWRAP_NEAREST_VERTEX:
BENCH(shrinkwrap_calc_nearest_vertex(&calc));
break;
}
}
//free memory
if(ss_mesh)
ss_mesh->release(ss_mesh);
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
imag_self_energy_with_g.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "imag_self_energy_with_g.h"
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include "lagrid.h"
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "triplet.h"
static long ise_set_g_pos_frequency_point(long (*g_pos)[4],
const long num_band0,
const long num_band,
const char *g_zero);
static void detailed_imag_self_energy_at_triplet(
double *detailed_imag_self_energy, double *imag_self_energy,
const long num_band0, const long num_band, const double *fc3_normal_squared,
const double *frequencies, const long triplet[3], const double *g1,
const double *g2_3, const char *g_zero, const double *temperatures,
const long num_temps, const double cutoff_frequency);
static double collect_detailed_imag_self_energy(
double *imag_self_energy, const long num_band,
const double *fc3_normal_squared, const double *n1, const double *n2,
const double *g1, const double *g2_3, const char *g_zero);
static double collect_detailed_imag_self_energy_0K(
double *imag_self_energy, const long num_band,
const double *fc3_normal_squared, const double *n1, const double *n2,
const double *g, const char *g_zero);
static void set_occupations(double *n1, double *n2, const long num_band,
const double temperature, const long triplet[3],
const double *frequencies,
const double cutoff_frequency);
void ise_get_imag_self_energy_at_bands_with_g(
double *imag_self_energy, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double *g, const char *g_zero,
const double temperature, const double cutoff_frequency,
const long num_frequency_points, const long frequency_point_index) {
long i, j, num_triplets, num_band0, num_band, num_band_prod;
long num_g_pos, g_index_dims, g_index_shift;
long(*g_pos)[4];
double *ise;
long at_a_frequency_point;
g_pos = NULL;
ise = NULL;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
num_band_prod = num_band0 * num_band * num_band;
ise = (double *)malloc(sizeof(double) * num_triplets * num_band0);
if (frequency_point_index < 0) {
/* frequency_points == frequencies at bands */
at_a_frequency_point = 0;
g_index_dims = num_band_prod;
g_index_shift = 0;
} else {
/* At an arbitrary frequency point. */
at_a_frequency_point = 1;
g_index_dims = num_frequency_points * num_band * num_band;
g_index_shift = frequency_point_index * num_band * num_band;
}
#ifdef _OPENMP
#pragma omp parallel for private(num_g_pos, j, g_pos)
#endif
for (i = 0; i < num_triplets; i++) {
g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod);
/* ise_set_g_pos only works for the case of frquency points at */
/* bands. For frequency sampling mode, g_zero is assumed all */
/* with the array shape of (num_triplets, num_band0, num_band, */
/* num_band). */
if (at_a_frequency_point) {
num_g_pos = ise_set_g_pos_frequency_point(
g_pos, num_band0, num_band,
g_zero + i * g_index_dims + g_index_shift);
} else {
num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band,
g_zero + i * num_band_prod);
}
ise_imag_self_energy_at_triplet(
ise + i * num_band0, num_band0, num_band,
fc3_normal_squared->data + i * num_band_prod, frequencies,
triplets[i], triplet_weights[i],
g + i * g_index_dims + g_index_shift,
g + (i + num_triplets) * g_index_dims + g_index_shift, g_pos,
num_g_pos, &temperature, 1, cutoff_frequency, 0,
at_a_frequency_point);
free(g_pos);
g_pos = NULL;
}
for (i = 0; i < num_band0; i++) {
imag_self_energy[i] = 0;
}
for (i = 0; i < num_triplets; i++) {
for (j = 0; j < num_band0; j++) {
imag_self_energy[j] += ise[i * num_band0 + j];
}
}
free(ise);
ise = NULL;
}
void ise_get_detailed_imag_self_energy_at_bands_with_g(
double *detailed_imag_self_energy, double *imag_self_energy_N,
double *imag_self_energy_U, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const double *g, const char *g_zero, const double temperature,
const double cutoff_frequency) {
double *ise;
long i, j, num_triplets, num_band0, num_band, num_band_prod;
long *is_N;
double ise_tmp, N, U;
ise = NULL;
is_N = NULL;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
num_band_prod = num_band0 * num_band * num_band;
ise = (double *)malloc(sizeof(double) * num_triplets * num_band0);
/* detailed_imag_self_energy has the same shape as fc3_normal_squared. */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < num_triplets; i++) {
detailed_imag_self_energy_at_triplet(
detailed_imag_self_energy + i * num_band_prod, ise + i * num_band0,
num_band0, num_band, fc3_normal_squared->data + i * num_band_prod,
frequencies, triplets[i], g + i * num_band_prod,
g + (i + num_triplets) * num_band_prod, g_zero + i * num_band_prod,
&temperature, 1, cutoff_frequency);
}
is_N = (long *)malloc(sizeof(long) * num_triplets);
for (i = 0; i < num_triplets; i++) {
is_N[i] = tpl_is_N(triplets[i], bz_grid_addresses);
}
for (i = 0; i < num_band0; i++) {
N = 0;
U = 0;
/* #ifdef _OPENMP */
/* #pragma omp parallel for private(ise_tmp) reduction(+:N,U) */
/* #endif */
for (j = 0; j < num_triplets; j++) {
ise_tmp = ise[j * num_band0 + i] * triplet_weights[j];
if (is_N[j]) {
N += ise_tmp;
} else {
U += ise_tmp;
}
}
imag_self_energy_N[i] = N;
imag_self_energy_U[i] = U;
}
free(is_N);
is_N = NULL;
free(ise);
ise = NULL;
}
void ise_imag_self_energy_at_triplet(
double *imag_self_energy, const long num_band0, const long num_band,
const double *fc3_normal_squared, const double *frequencies,
const long triplet[3], const long triplet_weight, const double *g1,
const double *g2_3, const long (*g_pos)[4], const long num_g_pos,
const double *temperatures, const long num_temps,
const double cutoff_frequency, const long openmp_at_bands,
const long at_a_frequency_point) {
long i, j;
double *n1, *n2;
long g_pos_3;
n1 = (double *)malloc(sizeof(double) * num_temps * num_band);
n2 = (double *)malloc(sizeof(double) * num_temps * num_band);
for (i = 0; i < num_temps; i++) {
set_occupations(n1 + i * num_band, n2 + i * num_band, num_band,
temperatures[i], triplet, frequencies,
cutoff_frequency);
}
for (i = 0; i < num_band0 * num_temps; i++) {
imag_self_energy[i] = 0;
}
/* Do not use OpenMP here!! */
/* g_pos[i][0] takes value 0 <= x < num_band0 only, */
/* which causes race condition. */
for (i = 0; i < num_g_pos; i++) {
if (at_a_frequency_point) {
/* At an arbitrary frequency point */
g_pos_3 = g_pos[i][3] % (num_band * num_band);
} else {
/* frequency_points == frequencies at bands */
g_pos_3 = g_pos[i][3];
}
for (j = 0; j < num_temps; j++) {
if (n1[j * num_band + g_pos[i][1]] < 0 ||
n2[j * num_band + g_pos[i][2]] < 0) {
;
} else {
if (temperatures[j] > 0) {
imag_self_energy[j * num_band0 + g_pos[i][0]] +=
((n1[j * num_band + g_pos[i][1]] +
n2[j * num_band + g_pos[i][2]] + 1) *
g1[g_pos_3] +
(n1[j * num_band + g_pos[i][1]] -
n2[j * num_band + g_pos[i][2]]) *
g2_3[g_pos_3]) *
fc3_normal_squared[g_pos[i][3]] * triplet_weight;
} else {
imag_self_energy[j * num_band0 + g_pos[i][0]] +=
g1[g_pos_3] * fc3_normal_squared[g_pos[i][3]] *
triplet_weight;
}
}
}
}
free(n1);
n1 = NULL;
free(n2);
n2 = NULL;
}
long ise_set_g_pos(long (*g_pos)[4], const long num_band0, const long num_band,
const char *g_zero) {
long num_g_pos, j, k, l, jkl;
num_g_pos = 0;
jkl = 0;
for (j = 0; j < num_band0; j++) {
for (k = 0; k < num_band; k++) {
for (l = 0; l < num_band; l++) {
if (!g_zero[jkl]) {
g_pos[num_g_pos][0] = j;
g_pos[num_g_pos][1] = k;
g_pos[num_g_pos][2] = l;
g_pos[num_g_pos][3] = jkl;
num_g_pos++;
}
jkl++;
}
}
}
return num_g_pos;
}
static long ise_set_g_pos_frequency_point(long (*g_pos)[4],
const long num_band0,
const long num_band,
const char *g_zero) {
long num_g_pos, j, k, l, kl, jkl;
num_g_pos = 0;
jkl = 0;
for (j = 0; j < num_band0; j++) {
kl = 0;
for (k = 0; k < num_band; k++) {
for (l = 0; l < num_band; l++) {
if (!g_zero[kl]) {
g_pos[num_g_pos][0] = j;
g_pos[num_g_pos][1] = k;
g_pos[num_g_pos][2] = l;
g_pos[num_g_pos][3] = jkl;
num_g_pos++;
}
jkl++;
kl++;
}
}
}
return num_g_pos;
}
static void detailed_imag_self_energy_at_triplet(
double *detailed_imag_self_energy, double *imag_self_energy,
const long num_band0, const long num_band, const double *fc3_normal_squared,
const double *frequencies, const long triplet[3], const double *g1,
const double *g2_3, const char *g_zero, const double *temperatures,
const long num_temps, const double cutoff_frequency) {
long i, j, adrs_shift;
double *n1, *n2;
n1 = NULL;
n2 = NULL;
n1 = (double *)malloc(sizeof(double) * num_band);
n2 = (double *)malloc(sizeof(double) * num_band);
for (i = 0; i < num_temps; i++) {
set_occupations(n1, n2, num_band, temperatures[i], triplet, frequencies,
cutoff_frequency);
for (j = 0; j < num_band0; j++) {
adrs_shift = j * num_band * num_band;
if (temperatures[i] > 0) {
imag_self_energy[i * num_band0 + j] =
collect_detailed_imag_self_energy(
detailed_imag_self_energy + adrs_shift, num_band,
fc3_normal_squared + adrs_shift, n1, n2,
g1 + adrs_shift, g2_3 + adrs_shift,
g_zero + adrs_shift);
} else {
imag_self_energy[i * num_band0 + j] =
collect_detailed_imag_self_energy_0K(
detailed_imag_self_energy + adrs_shift, num_band,
fc3_normal_squared + adrs_shift, n1, n2,
g1 + adrs_shift, g_zero + adrs_shift);
}
}
}
free(n1);
n1 = NULL;
free(n2);
n2 = NULL;
}
static double collect_detailed_imag_self_energy(
double *imag_self_energy, const long num_band,
const double *fc3_normal_squared, const double *n1, const double *n2,
const double *g1, const double *g2_3, const char *g_zero) {
long ij, i, j;
double sum_g;
sum_g = 0;
for (ij = 0; ij < num_band * num_band; ij++) {
imag_self_energy[ij] = 0;
if (g_zero[ij]) {
continue;
}
i = ij / num_band;
j = ij % num_band;
if (n1[i] < 0 || n2[j] < 0) {
continue;
}
imag_self_energy[ij] =
(((n1[i] + n2[j] + 1) * g1[ij] + (n1[i] - n2[j]) * g2_3[ij]) *
fc3_normal_squared[ij]);
sum_g += imag_self_energy[ij];
}
return sum_g;
}
static double collect_detailed_imag_self_energy_0K(
double *imag_self_energy, const long num_band,
const double *fc3_normal_squared, const double *n1, const double *n2,
const double *g1, const char *g_zero) {
long ij, i, j;
double sum_g;
sum_g = 0;
for (ij = 0; ij < num_band * num_band; ij++) {
imag_self_energy[ij] = 0;
if (g_zero[ij]) {
continue;
}
i = ij / num_band;
j = ij % num_band;
if (n1[i] < 0 || n2[j] < 0) {
continue;
}
imag_self_energy[ij] = g1[ij] * fc3_normal_squared[ij];
sum_g += imag_self_energy[ij];
}
return sum_g;
}
static void set_occupations(double *n1, double *n2, const long num_band,
const double temperature, const long triplet[3],
const double *frequencies,
const double cutoff_frequency) {
long j;
double f1, f2;
for (j = 0; j < num_band; j++) {
f1 = frequencies[triplet[1] * num_band + j];
f2 = frequencies[triplet[2] * num_band + j];
if (f1 > cutoff_frequency) {
n1[j] = phonoc_bose_einstein(f1, temperature);
} else {
n1[j] = -1;
}
if (f2 > cutoff_frequency) {
n2[j] = phonoc_bose_einstein(f2, temperature);
} else {
n2[j] = -1;
}
}
}
|
NeuralNetwork_OMP_CPU5.c | /* NEURAL NETWORK OMP CPU5.c
* by Lut99
*
* Created:
* 4/18/2020, 11:25:46 PM
* Last edited:
* 19/11/2020, 17:19:00
* Auto updated?
* Yes
*
* Description:
* The NeuralNetwork class implements a matrix-based Feedforward Neural
* Network which is hardcoded to use Mean Squared Error for cost function and
* sigmoid as activation function.
*
* This file implements the fifth of eight different OpenMP-optimised
* versions for the CPU. It optimises the forward pass only using threads for
* the outer loops and SIMD for the inner loops.
**/
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include "NeuralNetwork.h"
#define WEIGHTS_MIN -3.0
#define WEIGHTS_MAX 3.0
#define BIAS_MIN -3.0
#define BIAS_MAX 3.0
#define NUM_THREADS 16
/***** OPTIONAL PARAMETERS *****/
static unsigned int n_threads = 16;
/***** OPENMP DECLARATIONS *****/
extern int omp_set_num_threads();
extern int omp_get_num_procs();
extern int omp_get_thread_num();
/***** HELPER FUNCTIONS *****/
#define TIMEVAL_TO_MS(T_START, T_END) (((T_END.tv_sec - T_START.tv_sec) * 1000000 + (T_END.tv_usec - T_START.tv_usec)) / 1000000.0)
extern size_t max(size_t length, const size_t* list);
/***** NEURAL NETWORK OPERATIONS *****/
void nn_train(neural_net* nn, size_t n_samples, double** inputs, double** expected, double learning_rate, size_t n_iterations) {
#ifdef BENCHMARK
// Declare all timers
struct timeval s_total, e_total, s_iters, e_iters, s_fwd, e_fwd, s_bck_out, e_bck_out, s_bck_hid, e_bck_hid, s_upd, e_upd;
// Set some shortcuts for the timers
size_t half_iters = n_iterations / 2;
size_t half_samples = n_samples / 2;
// Start the total timer
gettimeofday(&s_total, NULL);
#endif
// Also obtain links to all biases / matrices
double** biases = nn->biases;
double** weights = nn->weights;
// Make some shortcuts for the number-of-nodes information
size_t n_layers = nn->n_layers;
size_t* nodes_per_layer = nn->nodes_per_layer;
// Initialize the temporary delta memory to the correct size
double* deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
double* prev_deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer));
// Create a list that is used to store intermediate outputs. The first input layer (=first column)
// is linked and not copied to the input data
double* layer_outputs[n_samples][n_layers];
for (size_t s = 0; s < n_samples; s++) {
// Link the input layer
layer_outputs[s][0] = inputs[s];
// Allocate arrays for the other layers
for (size_t l = 1; l < n_layers; l++) {
layer_outputs[s][l] = malloc(sizeof(double) * nodes_per_layer[l]);
}
}
// Create the delta_biases and delta_weights arrays / matrices
double* delta_biases[nn->n_weights];
double* delta_weights[nn->n_weights];
for(size_t l = 0; l < nn->n_weights; l++) {
delta_biases[l] = malloc(sizeof(double) * nodes_per_layer[l + 1]);
delta_weights[l] = malloc(sizeof(double) * nodes_per_layer[l] * nodes_per_layer[l + 1]);
// Fill with zeros
for (size_t n = 0; n < nodes_per_layer[l + 1]; n++) {
delta_biases[l][n] = 0;
for (size_t prev_n = 0; prev_n < nodes_per_layer[l]; prev_n++) {
delta_weights[l][prev_n * nodes_per_layer[l + 1] + n] = 0;
}
}
}
#ifdef BENCHMARK
// Start the iterations timer
gettimeofday(&s_iters, NULL);
#endif
// Perform the training for n_iterations (always)
for (size_t i = 0; i < n_iterations; i++) {
#pragma omp parallel for schedule(static)
for (size_t s = 0; s < n_samples; s++) {
/***** FORWARD PASS *****/
#ifdef BENCHMARK
// Start the forward pass timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_fwd, NULL);
}
#endif
// sample_outputs is a 2D flattened array for this layer
double** sample_outputs = layer_outputs[s];
// Iterate over each layer to feedforward through the network
for (size_t l = 1; l < n_layers; l++) {
// Get some references to the bias list, weight matrix and outputs of the previous and this layer
double* bias = biases[l - 1];
double* weight = weights[l - 1];
double* prev_output = sample_outputs[l - 1];
double* output = sample_outputs[l];
// Compute the activation for each node on this layer
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
for (size_t n = 0; n < this_nodes; n++) {
// Sum the weighted inputs for this node
double z = bias[n];
#pragma omp simd
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
z += prev_output[prev_n] * weight[prev_n * this_nodes + n];
}
// Run the activation function over this input and store it in the output
output[n] = 1 / (1 + exp(-z));
}
}
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_fwd, NULL);
}
#endif
}
/***** BACKWARD PASS *****/
// Implementation: https://towardsdatascience.com/simple-neural-network-implementation-in-c-663f51447547
// Loop through all samples to compute the backward cost
size_t last_nodes = nodes_per_layer[n_layers - 1];
size_t last_prev_nodes = nodes_per_layer[n_layers - 2];
double* last_delta_bias = delta_biases[n_layers - 2];
double* last_delta_weight = delta_weights[n_layers - 2];
for (size_t s = 0; s < n_samples; s++) {
#ifdef BENCHMARK
// End the forward timer, start the backward pass output timer
if (i == half_iters && s == half_samples) {
gettimeofday(&s_bck_out, NULL);
}
#endif
// Backpropagate the error from the last layer to the first.
double** sample_outputs = layer_outputs[s];
double* sample_expected = expected[s];
// Do the output layer: compute the deltas
double* output = sample_outputs[n_layers - 1];
#pragma omp simd
for (size_t n = 0; n < last_nodes; n++) {
double output_val = output[n];
prev_deltas[n] = (sample_expected[n] - output_val) * output_val * (1 - output_val);
}
// Do the output layer: compute the bias & weight updates
// Add all deltas as delta_biases for this layer
#pragma omp simd
for (size_t n = 0; n < last_nodes; n++) {
last_delta_bias[n] += prev_deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
double* last_prev_output = sample_outputs[n_layers - 2];
for (size_t prev_n = 0; prev_n < last_prev_nodes; prev_n++) {
#pragma omp simd
for (size_t n = 0; n < last_nodes; n++) {
last_delta_weight[prev_n * last_nodes + n] += last_prev_output[prev_n] * prev_deltas[n];
}
}
#ifdef BENCHMARK
// End the backward pass output timer, start the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_out, NULL);
gettimeofday(&s_bck_hid, NULL);
}
#endif
// Then, the rest of the hidden layers
for (size_t l = n_layers - 2; l > 0; l--) {
double* delta_bias = delta_biases[l - 1];
double* delta_weight = delta_weights[l - 1];
double* output = sample_outputs[l];
double* prev_output = sample_outputs[l - 1];
size_t next_nodes = nodes_per_layer[l + 1];
size_t this_nodes = nodes_per_layer[l];
size_t prev_nodes = nodes_per_layer[l - 1];
// Loop through all nodes in this layer to compute their deltas by summing all deltas of the next layer in a weighted fashion
double* weight_next = weights[l];
for (size_t n = 0; n < this_nodes; n++) {
// Take the weighted sum of all connection of that node with this layer
double error = 0;
#pragma omp simd
for (size_t next_n = 0; next_n < next_nodes; next_n++) {
error += prev_deltas[next_n] * weight_next[n * next_nodes + next_n];
}
// Multiply the error with the derivative of the activation function to find the result
double output_val = output[n];
deltas[n] = error * output_val * (1 - output_val);
}
// Add all deltas as delta_biases for this layer
#pragma omp simd
for (size_t n = 0; n < this_nodes; n++) {
delta_bias[n] += deltas[n];
}
// Same for all the weights, except we compute the delta_weights first
for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) {
#pragma omp simd
for (size_t n = 0; n < this_nodes; n++) {
delta_weight[prev_n * this_nodes + n] += prev_output[prev_n] * deltas[n];
}
}
// Swap the two deltas
double* temp = deltas;
deltas = prev_deltas;
prev_deltas = temp;
}
#ifdef BENCHMARK
// End the backward pass hidden timer
if (i == half_iters && s == half_samples) {
gettimeofday(&e_bck_hid, NULL);
}
#endif
}
#ifdef BENCHMARK
// Start the updates timer
if (i == half_iters) {
gettimeofday(&s_upd, NULL);
}
#endif
// Actually update the weights, and reset the delta updates to 0 for next iteration
#pragma omp parallel for schedule(static)
for (size_t l = 0; l < nn->n_weights; l++) {
double* bias = biases[l];
double* delta_bias = delta_biases[l];
double* weight = weights[l];
double* delta_weight = delta_weights[l];
// Update the biases & reset delta_biases
size_t this_nodes = nodes_per_layer[l + 1];
#pragma omp simd
for (size_t n = 0; n < this_nodes; n++) {
bias[n] += delta_bias[n] * learning_rate;
delta_bias[n] = 0;
}
// Update the weights & reset delta_weights
size_t prev_nodes = nodes_per_layer[l];
#pragma omp simd
for (size_t i = 0; i < this_nodes * prev_nodes; i++) {
weight[i] += delta_weight[i] * learning_rate;
delta_weight[i] = 0;
}
}
#ifdef BENCHMARK
// Stop the updates timer
if (i == half_iters) {
gettimeofday(&e_upd, NULL);
}
#endif
}
#ifdef BENCHMARK
// End the iterations timer
gettimeofday(&e_iters, NULL);
#endif
// Cleanup
// Free the delta biases / weights
for(size_t l = 0; l < n_layers - 1; l++) {
free(delta_biases[l]);
free(delta_weights[l]);
}
// Free the layer_outputs (skip the first, as these merely link the input rather than copy 'em)
for (size_t s = 0; s < n_samples; s++) {
for (size_t l = 1; l < n_layers; l++) {
free(layer_outputs[s][l]);
}
}
// Cleanup the deltas
free(deltas);
free(prev_deltas);
#ifdef BENCHMARK
// End the total timer
gettimeofday(&e_total, NULL);
// Print the results
printf("%f\n", TIMEVAL_TO_MS(s_total, e_total));
printf("%f\n", TIMEVAL_TO_MS(s_iters, e_iters));
printf("%f\n", TIMEVAL_TO_MS(s_fwd, e_fwd));
printf("%f\n", TIMEVAL_TO_MS(s_bck_out, e_bck_out));
printf("%f\n", TIMEVAL_TO_MS(s_bck_hid, e_bck_hid));
printf("%f\n", TIMEVAL_TO_MS(s_upd, e_upd));
#endif
}
/***** OTHER TOOLS *****/
void parse_opt_args(int argc, char** argv) {
// Parse and set number of threads as first argument
if (argc >= 1) {
// Set the number of threads
n_threads = atoi(argv[0]);
}
omp_set_num_threads(n_threads);
}
void print_opt_args() {
printf(" - Variation : OpenMP CPU 5 (Forward only, with SIMD)\n");
printf(" - Number of threads : %u\n", n_threads);
}
|
convolution_packnto1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
const float* kptr = (const float*)weight_data_packnto1.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * packn;
for (int k = 0; k < maxk; k++)
{
vfloat32m1_t _val = vle32_v_f32m1(sptr + space_ofs[k] * packn, vl);
vfloat32m1_t _w = vle32_v_f32m1(kptr, vl);
_sum = vfmacc_vv_f32m1(_sum, _val, _w, vl);
kptr += packn;
}
}
sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
BRKGA.h | /**
* BRKGA.h
*
* This template class encapsulates a Biased Random-key Genetic Algorithm for minimization problems
* with K independent Populations stored in two vectors of Population, current and previous.
* It supports multi-threading via OpenMP, and implements the following key methods:
*
* - BRKGA() constructor: initializes the populations with parameters described below.
* - evolve() operator: evolve each Population following the BRKGA methodology. This method
* supports OpenMP to evolve up to K independent Populations in parallel.
* Please note that double Decoder::decode(...) MUST be thread-safe.
*
* Required parameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations (set to 1 if not supplied)
* - MAX_THREADS: number of threads to perform parallel decoding (set to 1 if not supplied)
* WARNING: Decoder::decode() MUST be thread-safe if MAX_THREADS > 1!
*
* The following objects are required upon declaration:
* RNG: random number generator that implements the methods below.
* - RNG(unsigned long seed) to initialize a new RNG with 'seed'
* - double rand() to return a double precision random deviate in range [0,1)
* - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1)
* - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32
*
* Decoder: problem-specific decoder that implements any of the decode methods outlined below. When
* compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via
* OpenMP), the method must be thread-safe.
* - double decode(const vector< double >& chromosome) const, if you don't want to change
* chromosomes inside the framework, or
* - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome.
* WARNING: even though both methods use const correctness to enforce that they are thread safe
* the use of mutable within the Decoder class could void such a feature! In other
* words, DO NOT use mutable within the decoder.
*
* Created on : Jun 22, 2010 by rtoso
* Last update: Sep 15, 2011 by rtoso
* Authors : Rodrigo Franco Toso <rtoso@cs.rutgers.edu>
* Mauricio G.C. Resende <mgcr@research.att.com>
*
* The MIT License (MIT)
*
* Copyright (c) 2018
* Rodrigo Franco Toso (rfrancotoso@gmail.com) and
* Mauricio G.C. Resende
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef BRKGA_H
#define BRKGA_H
#include <omp.h>
#include <algorithm>
#include <exception>
#include <stdexcept>
#include "Population.h"
template< class Decoder, class RNG >
class BRKGA {
public:
/*
* Default constructor
* Required hyperparameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations
* - MAX_THREADS: number of threads to perform parallel decoding
* WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as
* + double Decoder::decode(std::vector< double >& chromosome) const
*/
BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe, const Decoder& refDecoder,
RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1) throw(std::range_error);
/**
* Destructor
*/
~BRKGA();
/**
* Resets all populations with brand new keys
*/
void reset();
/**
* Evolve the current populations following the guidelines of BRKGAs
* @param generations number of generations (must be even and nonzero)
* @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization)
* @param M number of elite chromosomes to select from each population in order to exchange
*/
void evolve(unsigned generations = 1);
/**
* Exchange elite-solutions between the populations
* @param M number of elite chromosomes to select from each population
*/
void exchangeElite(unsigned M) throw(std::range_error);
/**
* Returns the current population
*/
const Population& getPopulation(unsigned k = 0) const;
/**
* Returns the chromosome with best fitness so far among all populations
*/
const std::vector< double >& getBestChromosome() const;
/**
* Returns the best fitness found so far among all populations
*/
double getBestFitness() const;
// Return copies to the internal parameters:
unsigned getN() const;
unsigned getP() const;
unsigned getPe() const;
unsigned getPm() const;
unsigned getPo() const;
double getRhoe() const;
unsigned getK() const;
unsigned getMAX_THREADS() const;
private:
// I don't see any reason to pimpl the internal methods and data, so here they are:
// Hyperparameters:
const unsigned n; // number of genes in the chromosome
const unsigned p; // number of elements in the population
const unsigned pe; // number of elite items in the population
const unsigned pm; // number of mutants introduced at each generation into the population
const double rhoe; // probability that an offspring inherits the allele of its elite parent
// Templates:
RNG& refRNG; // reference to the random number generator
const Decoder& refDecoder; // reference to the problem-dependent Decoder
// Parallel populations parameters:
const unsigned K; // number of independent parallel populations
const unsigned MAX_THREADS; // number of threads for parallel decoding
// Data:
std::vector< Population* > previous; // previous populations
std::vector< Population* > current; // current populations
// Local operations:
void initialize(const unsigned i); // initialize current population 'i' with random keys
void evolution(Population& curr, Population& next);
bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const;
};
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe,
const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) throw(std::range_error) :
n(_n), p(_p), pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe), refRNG(rng),
refDecoder(decoder), K(_K), MAX_THREADS(MAX), previous(K, 0), current(K, 0) {
// Error check:
using std::range_error;
if(n == 0) { throw range_error("Chromosome size equals zero."); }
if(p == 0) { throw range_error("Population size equals zero."); }
if(pe == 0) { throw range_error("Elite-set size equals zero."); }
if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); }
if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); }
if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); }
if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); }
// Initialize and decode each chromosome of the current population, then copy to previous:
for(unsigned i = 0; i < K; ++i) {
// Allocate:
current[i] = new Population(n, p);
// Initialize:
initialize(i);
// Then just copy to previous:
previous[i] = new Population(*current[i]);
}
}
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::~BRKGA() {
for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; }
}
template< class Decoder, class RNG >
const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const {
#ifdef RANGECHECK
if(k >= K) { throw std::range_error("Invalid population identifier."); }
#endif
return (*current[k]);
}
template< class Decoder, class RNG >
double BRKGA< Decoder, RNG >::getBestFitness() const {
double best = current[0]->fitness[0].first;
for(unsigned i = 1; i < K; ++i) {
if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; }
}
return best;
}
template< class Decoder, class RNG >
const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const {
unsigned bestK = 0;
for(unsigned i = 1; i < K; ++i) {
if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; }
}
return current[bestK]->getChromosome(0); // The top one :-)
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::reset() {
for(unsigned i = 0; i < K; ++i) { initialize(i); }
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::evolve(unsigned generations) {
#ifdef RANGECHECK
if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); }
#endif
for(unsigned i = 0; i < generations; ++i) {
for(unsigned j = 0; j < K; ++j) {
evolution(*current[j], *previous[j]); // First evolve the population (curr, next)
std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next)
}
}
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) throw(std::range_error) {
#ifdef RANGECHECK
if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); }
#endif
for(unsigned i = 0; i < K; ++i) {
// Population i will receive some elite members from each Population j below:
unsigned dest = p - 1; // Last chromosome of i (will be updated below)
for(unsigned j = 0; j < K; ++j) {
if(j == i) { continue; }
// Copy the M best of Population j into Population i:
for(unsigned m = 0; m < M; ++m) {
// Copy the m-th best of Population j into the 'dest'-th position of Population i:
const std::vector< double >& bestOfJ = current[j]->getChromosome(m);
std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin());
current[i]->fitness[dest].first = current[j]->fitness[m].first;
--dest;
}
}
}
for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); }
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) {
for(unsigned j = 0; j < p; ++j) {
for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); }
}
//DAYVSON MEXE AQUI
// Decode:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int j = 0; j < int(p); ++j) {
current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) );
}
// Sort:
current[i]->sortFitness();
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) {
// We now will set every chromosome of 'current', iterating with 'i':
unsigned i = 0; // Iterate chromosome by chromosome
unsigned j = 0; // Iterate allele by allele
// 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current':
while(i < pe) {
for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); }
next.fitness[i].first = curr.fitness[i].first;
next.fitness[i].second = i;
++i;
}
// 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm:
while(i < p - pm) {
// Select an elite parent:
const unsigned eliteParent = (refRNG.randInt(pe - 1));
// Select a non-elite parent:
const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1));
// Mate:
for(j = 0; j < n; ++j) {
const unsigned& sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent);
next(i, j) = curr(curr.fitness[sourceParent].second, j);
}
++i;
}
// We'll introduce 'pm' mutants:
while(i < p) {
for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); }
++i;
}
// Time to compute fitness, in parallel:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int i = int(pe); i < int(p); ++i) {
next.setFitness( i, refDecoder.decode(next.population[i]) );
}
// Now we must sort 'current' by fitness, since things might have changed:
next.sortFitness();
}
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getN() const { return n; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getP() const { return p; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; }
template< class Decoder, class RNG >
double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getK() const { return K; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; }
#endif
|
vadd.c | #include <stdint.h>
#include <stdio.h>
#include <omp.h>
void vadd (uint64_t *a, uint64_t *b, uint64_t *c, uint64_t vecLen, uint64_t *result) {
int N;
N = 1000;
#pragma omp target teams distribute parallel for num_teams(8)
for (uint64_t i = 0; i < vecLen; i++) {
uint64_t sum = a[i] + b[i];
c[i] = sum;
}
#pragma omp target
{
uint64_t sum0;
#pragma omp parallel for reduction(+:sum0)
for (uint64_t i = 0; i < vecLen; i++) {
sum0 += c[i];
}
*result = sum0;
}
}
|
GB_unop__lgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lgamma_fp64_fp64)
// op(A') function: GB (_unop_tran__lgamma_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = lgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = lgamma (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lgamma_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lgamma_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack16_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 16;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _v = _mm512_loadu_ps(r0);
_mm512_storeu_ps(outptr, _v);
r0 += 32;
outptr += 16;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack16_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_binop__islt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint8)
// A*D function (colscale): GB (_AxD__islt_uint8)
// D*A function (rowscale): GB (_DxB__islt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint8)
// C=scalar+B GB (_bind1st__islt_uint8)
// C=scalar+B' GB (_bind1st_tran__islt_uint8)
// C=A+scalar GB (_bind2nd__islt_uint8)
// C=A'+scalar GB (_bind2nd_tran__islt_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opencl_dmg_fmt_plug.c | /* DMG cracker patch for JtR. Hacked together during August of 2012
* by Dhiru Kholia <dhiru.kholia at gmail.com>
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
/*
* Debug levels:
* 1 show what "test" hits
* 2 dump printables from the decrypted blocks
* 3 dump hex from the decrypted blocks
* 4 dump decrypted blocks to files (will overwrite with no mercy):
* dmg.debug.main main block
* dmg.debug alternate block (if present, this is the start block)
*/
//#define DMG_DEBUG 2
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_dmg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_dmg);
#else
#include <string.h>
#include <openssl/aes.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "options.h"
#include "jumbo.h"
#include "common-opencl.h"
#define FORMAT_LABEL "dmg-opencl"
#define FORMAT_NAME "Apple DMG"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES/AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#undef HTONL
#define HTONL(n) (((((unsigned long)(n) & 0xFF)) << 24) | \
((((unsigned long)(n) & 0xFF00)) << 8) | \
((((unsigned long)(n) & 0xFF0000)) >> 8) | \
((((unsigned long)(n) & 0xFF000000)) >> 24))
#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t unsigned int
#define OCL_CONFIG "dmg"
#ifdef DMG_DEBUG
extern volatile int bench_running;
#endif
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} dmg_password;
typedef struct {
uint32_t v[32/4];
} dmg_hash;
typedef struct {
uint8_t length;
uint8_t salt[20];
int iterations;
int outlen;
} dmg_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt {
unsigned int saltlen;
unsigned char salt[20];
unsigned int ivlen;
unsigned char iv[32];
int headerver;
unsigned char chunk[8192];
uint32_t encrypted_keyblob_size;
uint8_t encrypted_keyblob[128];
unsigned int len_wrapped_aes_key;
unsigned char wrapped_aes_key[296];
unsigned int len_hmac_sha1_key;
unsigned char wrapped_hmac_sha1_key[300];
char scp; /* start chunk present */
unsigned char zchunk[4096]; /* chunk #0 */
int cno;
int data_size;
unsigned int iterations;
} *cur_salt;
static cl_int cl_error;
static dmg_password *inbuffer;
static dmg_hash *outbuffer;
static dmg_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
size_t insize, outsize, settingsize, cracked_size;
static struct fmt_tests dmg_tests[] = {
// testimage.AES-256.64k.header_v2.dmg
{"$dmg$2*20*fd70ac1e078f01fce55a2e56145a2494446db32a*32*9110b1778f09b1a7000000000000000000000000000000000000000000000000*64*68a32866b0e67515f35dc67c4d6747a8561a9f4f6a6718a894b0a77a47c452471e04ecef9bf56f0d83d1201a509a374e00000000000000000000000000000000*14*8192*70ebe6f1d387e33e3d1093cca2e94c9a32e2c9ba47d461d737d49a7dc1b1f69407b7dbc16f7671689ea4a4641652b3f976b6f1c73c551a0a407d5a335caa169db4a6a25bbd27fbbc38fc71b29ee9b1eae349b0d8a21d57959ecca6bf74bc26ccaee69cfee4999b55374605491af6d0b9066c26995209cd1b71925bcb45a8ef5727a6c20338f08de4357d4cb42cb65ecdc2344a5d7387633c913258ba40699ea5f88804b5e562bf973096337b17b4fc1236d3c8a80b9b48aed63c5a0eae3ae924a883e948f374771bba46923658f225fd2795ce0e795269f589e0ffc81615585e1224cddde654d689a3260e69683c6198bdfcd87507c23cefe36d72f8878cb27bbe5dce868752a7cce067f5a3110f20ebd31ecd53840103e0b2d44385656398edc487bf6d1a5ec3a56af54f9d4254fd20988df41eb85e366f13da1270a3f42c6672ad5faf00fa21e9ba3691bde78ab2c267a142f275467d5b853a107dbf1d75839f0e87b3b4f1d2cec88cc02a26bc4a63aa6836b0c43c5dbb44a832050385a48d46968361ebb053c2416c02458b76c95e50970922556d40b100967340a32824e6b6e44c0c1e0da7ce989d9d5ad91560156"
"ed39666cbfbea71f28797a5a7a40e77665612e977ecb8b7fe71d500eafc29d9a0ec1d0ff1723fea7c405bc181ea93c0df42f5bf886eace3cfeee8b0dba52ba8cd2ae009e75d8845264d12dd632ca3236bc1b643437881b270183d2e2bd20808ae73d32bfe88347e33bef4921fcfac9646b74f116be1f04fc353d2222499d5247fa842d0d0f00fc9642ea7524adb65c18fff87b6efd060ec850d7de6f59869387b3d4cc8e38014d52d94ead07d16b8d94327fe5533941497c9be2dd6c04142ba57e29daaeef96d0f2d109522651d797715f4bc5f4cc3fb69fa92623b5ea3e08ff78dc59913993c877f4e2c8964dffd2c8cde6c6b6738da2883505486df5b633aaa8c66acbc2886107f3dd61b1df29f54a13ef27a7d2785c02153375240885e5c54297d88827403320799e05213761549eedc1c159c922087983410d2abadf9ef8ae460d018c278a9ea724f52b866e3d7ff2374496103b5137297100c970d195fca8c1286a8f9d3859ee12c84bdaa4b56ca91e307580b61dbe435ce4021007e4a2a8085976549cf1d195f439bb6e642567f91a0224e98796614d9ea6bfab8f6d13f91b7a80a54e538a1a785cd07b5d7ed2b7e45a0658b5722b5f8844f5139cff3b33ce244946757c020c54c8b5e43324023ed11001201213ffe4829e37135686a8bec1837b35fb234049570868dc5ba9c84cef6890d9ec400a794b1723eb209a60758ba9ae9abd23a7ea9f94fc6b73d29a560e24973c9160f195fbe82376c81dfeec1a7f912a8c22c067a26786a22f0b7db298"
"3631400f120010706c78acc36ddcc29c7055fe82105f770e2dadf131ab49af93539fb5186d32dbe4a4df6cb0fdf6840c0609c8769fe242cc60d87e04e6e3be1a7884a05d9fb96c3bc1bbc769d96bbcc0413492eefc5502e9c1ac7c3f237b9851dc453b5bfa899b7b68e5e3b92711e7c92945feb6f6e452d6216e154a952cc28a3740925554d9fd44acedc8a44b0c25bbb6aa637fe9560437c08b17992c74de38fe1fb8fd5f66c2933c2d573ddc914f68f42d6cb350f126a51f607a2dd23b63e6382ec1e6ae434f47cfcd1e7d96c8293ef2994f850a27ef2d8210a0df0c219eadd2376ce36a22db56827d92a90d5e2fa55a4154c39061bd5490ba29f8309cf3e2056f761762dff56803bbe0607faef510d023b249663368977fede0577944f2ff05ead4b432bbb07a7d90148ebd1e30bf1204cd9069725d9fdbb850d3d6fde5044da1b9ffa222d99061c8ae217bc5b249960db545e6fece3ea2faeefa7702f065764b326ae0e62f3b8745cb73f35bea1bb9f6ed4fcda591f4d84da0415a0552306f6691a64a1d0efc8ac93559a79e57e357b63df48506c12dde74f6ea8fc5eeb1846c394fb8fd0fd40df26a42e53692db51bb36403305c1aff797e20adb6f8f1721e316705dcf8fe6e6989a5c3da253fdc6cb5de426f1c018161d72e34e6791d73023c5df69c0f83d3ea1d097f3a7ff37720a66868f40d3b87755bdaf508086c7e478ac1efc0dc421987af6db9b2f096a7270de91f5b3b84ee6d1d268d581718d3c534eeffbe2889388e9930cb051b5752c1a"
"b1faf1e367866af7d4b37ba25c15a030d9a5f32bb8912ce853fe7988dc62aa61264e3c5a29d18c5121a605558b15004c817cb0ab1646138cbf6375f1a179852bc22d80b83891edfd38e25efcc0dbb78062f479a9dc792e5822e09ba3e0b8ef71c62ad7747dba8cc97707f31383baa93108d5c7253dce2395fa24d77c42cbf3559b5dc0235c0ce49ef9e3cc816598698c8f8c5b32abfaeb44f3c35a01a4f47421a166d5aa893aaba80e57eb576b838c95ed6f9d5b3d389a8f86b97fe629408ec7c7ba7fd95d7625e950c7324fdd35989570b24f2e1e24d52b65ed6116e728dc3a1004d3d8fbfeeaea1c7dc5d3dc7a029f97f8dc7f740e2386eb27e9793680d959821031fda08c7146f46e8ee47ec28c7d25574eb690de09849725e490c39e524b74aecfc68ff0d760d115b4d0a126609cef83b6c80731dd17f4a307331464953c6b41875b6e5fea328fd59f275e2fabd25717781cf9d5cc52286246ebc92527eeac7acc6e2652c6fcff405e7b4a78b8f9475f46bb82a68a6e44037d61de0df58a8b7a81f407aaa260f3a49c4a2641776404fc15bfb77573dc8728573a1872e7e093663842d9368e74cbe3ae547355fa101daeaa0f97dc0a63927e54ae59fe13aac4f488e938fa67a12876d103b4a56b6eb88ff0104330e5cdc7c6886b46545d523bfbfc88f40f9654fcd0f8c4f443a225b50b44af9674166d3de36b6ac63a150fbcda2e2511ae2a42fbe51c08f7238366aada5c6be8eeb41963c6a5374a94b332012e860d6cfbc1b8a4d5a9825b88a90c9a5f"
"5615ca503698ad00df2cd93467b66d9b15876bc49895a081959132bad2e63757aa4e5ff77c6f25dd2581a3e9bb8e213c9313ceca0fcf5f8416882849fbee576d8ffb9dc057eb96bf6b81db60a82b0e6f315a13dd31706c0e36f4f21b9ce977ff6700cd77db603120d59ad8088e121cc3c502e37774b098eee7c8244f9bbe0d4a9d0deba3ec22e5abfea69ab72cdb75a001bb53672fe12b4fdbdf7e82c0bb2608de5d8e1961fb4524dd1acc890361923fb691bc5ea436246428a70b5021f9eee2c637eeab574babde4c0d55f57925e511ff623af5c4224d3ccb9c8572179e2610b4b79817ca18ddcb5302151f9facffca96269ff5fbb11e48209e20145bdd70d72bae54f6fbb89a3396bdaaa3d45413e3c5bc672ab98dfbeb3274156096f641494c1c946baab7c388a16c71ce5009b32f45dbbe37998906570045027950bd758b7ab2f72c243eccf9551d539946a99779848b16cddf9f163fcefe1e1ebee3ba7d5240b92698ad56a036274ca798eae19b0dbcf39a1c0ea1a58b29dc0e3de89def08e6c5800c94db47b7eaef5514c002d687b4d99b00fbd44137f56557830d63156f43bf73db8b330bca0ebb4ea5d50941b758929722aaa5452cd4a4e00640165dfc35fd35daaf929997adeb4c4f7611d66befb80809dc7bc6c763879c3bcd8dd0fe6b621898717fd095fb7eb403b07591b931a8e16ab488b01acd636bf4f1e71d5460532b8a3b00d7353e84c071de5cfa25de685cb85b569e08d2f177727cda11f196b040d25c97ccb83e355db98c2bc14844"
"1ca95b5f612020bc53a81184ccd0c5f14bf6d9fd6318ec28bafe8d668cb3c98c56ad416007bef4a3ed9e12eafe8f9e7d87fbb02d1f557b497db1a2c0fe40ec3f23ea88332513c68f724cc8a8af6636c9f332a8e55c2d41fd81a23e92e9ffacd3ef14cda669e7dbe31ca08a5238c7fbfe7020933087bf2ce0a7489fd5a3becce5de09628234f60c833002aa8e9c9ec51f57c8e4ba095c1d054750d46d64041bb1f567a82d63bb5e88fb70bdddad0ed7572229e56b90e74dd88ca829f1ce8424bd24a0bbfe3dc3f77d244ee59f364b36a4b05fb511b5b0d7f876c65ab4233803543b0a68b9d2d6d45d292f91eb4700c2dbf431e40c77a4fcc3ac3fdf3a2bae3df35b6417b8f1eedfe84cc65a07c426780871d16ec5ed3201ea4eaa778b71f04cc1999587bb4645bbc43e365395e9188c85bd024f758304aee979f8e67d07636fea251423e920e2b7258580d1918fce772bf02ee66926fc5f9a3dd6a8c89e6ce7e4fc03d4784296df1a9152a1fc66050983a287e3520bf3e04d900d25316c8bd5ab489bf97a2f31f4061f895111caff9968ecb22d75cb9e5400ca1d0fb044acb4fb9cccaa4766cf6c63ae5a7a3f9af90d1b225067f671d85cdb4e2e21d2850f351d995d54520fdcbb8cb30bfa82190ab2071eb8bf350f984408b206597371736110114d12d79da4027f9a58c8fede63cf16fa552d2a956ae2a49c83b0afca3056f87f1e27bdeb9d14a7e5cf30550017a3233c4f386769021a853b971746aa28aa69ca980bb02979779c5bd29259c84911e2b252"
"61b92be669e8a731dd74edce66b6f3ab5944695efd57c0004ff637eabfbc02ae346528fedbf2ae80d420580adc4d571a37fa1397fc2b85ec458d5262c15620c88f2dca0eb1bae4ec39d67fef56ecbdf89703919e5a6767d0f77bf6f0f60ba21003d033c9dc3057df18d855a5801110fa9a29a42ce10a44a39ed883df249ccddef8aaf832387e70048d9ad6014cc17f9a2bf7146696ee4eed388d06a45f7bd7696e57500ecfada9e9eb17926b16bbd90146e406e281141f0a918c320cacc9d1f045ac1bba87ce8d1d45cb6303988d5228da6ad33df6d2a5bd7f265b8f610078e9db5fa3db0e08286e500063f0fd6860a11d9985226ad382a95bc3c3941d43378ea1bf28fc85749f616092d77e7c292e311337168b52eba08ffc0f76582710a1a7d33c55162b3c7fbf227a324e1f4579e035ae0fa17fafb1ea964aa977490b5a3fc16c75e1fc50a6d17e193345b71369df804c61a71bf60be4281c3d1f945c690368c23caab006f9dfc913dbe6119d6fe8349cdd424db7074726e8bdd0ae99e2bfb9b800ddb965c06e0587cd10108c9b431cad4fd10d3654a22ceac73553a6b2b2218ed6526c362df46cfa776e2caea0de61b9d5c0c74e03e299ceb2221ed0f30ffc5876354d5607c3eafc77f78e4fce5e0c7f6ba7d417ac5f0511e2635b41b28dfb4f2fbb73d351a69fff920b76f5687386114b3d5ab9cad056c88840a023b7e2df73f007852763570d38a966c8258365b014a12a3497f506dbe55c073244333547223785438372884ecd8b66aa0a794ab5fb"
"94b0a519bb3cbf01b43463c0c7fc6ebc67754ca25686002e13edad54c817b0aef64698637d18a4a8bba382add892f4918b720aa99b09ed2a6e02b7140f89e3e00680f37343d3e47412d04ef78005b8b9a23b92d145a8da9c5efafce374955727367a7f1a179b990868550cf960c6df6baf2cddda5fe3e689de8dfcf1474db419ecf88cbce9de7a58e9d8a15991fdf5361846273d195a2892fbc95ad079ca8153910984c4694edb4c790f430043c4019fbd96fe49d8afa5e7d1f6674e4a125bfbdc916b0d3819566898599443ebf2a87b1fdaf41378227d396d2d320dc5b860705bc87f45eba2b6473234fe054267698dba0913ab1234b46697c54e2b19526d1ad4b7e3eab40a413f86170fe9f2a71eae2fb959a021b0b43516f1c8a3e674f37ee235ade79ca296364b0cad5ebe8449e09b63a34e8711587f7f2fe6e181a787b1d3a8f30012ce9549abb834fb80c673c575a25d3c33bb6d846ac231f411dd6422c59215e0a267424c0c57e6c9bd5486e8b6327e9dd16b7065eb74ef91ec9204360b03d08654a4e418346ec2d4d21edd5608a76903494791546d430eac38178d158d61951de3c61fbe5d56c22cbda4a3d40297f7abd83913e8b483d9a80cf000810d90a921f453bcf9e35732d2579c1aaef4a6980c666e3b273a9f91d9918f850bd6e4475d8aa5cb616cec58d6ab6d70dbe2b0f7ad85618b6e60dd4ff5d0faf19dfdf27a9ee48cd7b2d6613e76f04ab6ef5f0af12966a90875816c27c4297a2bf622ddf66fbe7c211670d0c46c7295b93bd2f1"
"22568df3dc46e9294c7258a0b7e81b2d45979680edbb7ab323e4857d84306ccc16ca79c711144eab7b37e3437245d7b78ced1cfebfc45892791b9ac6cc1211f83e328ce3f57af3d89b5be89dd2efeac9d738330bd0d8d4a059bfac06d1ad73bf6d427541e559c3d16eb5adc4380c1b25c1b8a9097ce7eeeed1c5d6884dd1a32ee2bfaab8371593a0eef65f80e705b9b56adfc0db4c272024a71947755032a5ebc1bb346ee8a99b01b408cc0b1658a319ffa5ab2eb87e9aa8b3dd9d9d92ce3bc04e4ebcc011a280143927676360f249ccdaf7949bb23770a06ff5861661d36d761508f7e9ba149310d1347c3165e07997853d415abdacfae9579d1dc0b5990a05ae9e6dce8931ac2db9414546dc64f8161a64cf30b9ce8c50ef2a99775f03dfc2c611e780a5cbcc27cab920a87d940acd8b3fd42897ab6f51b29214275bd564c50eb7aab3ad19a2c903c84d2ed5a23c49c81d87cf3244505424332c917d7b671d4a90765b8953c26bb7ed5dfe3e93632610ab44296afee2b5c631fe643a0a78eb9af94d700250f5a82bc57d24825423f1ecfd8cc2bb0daa229670d0d9a4fb342ee8c9b7b16d86d29abc2a57633303b918ac78ea8d2672dfdd4a06ea0bbd756fbadfb0c09e2426a65e90ca829ea00ad66ca8c9e79b9aa5ddd02d435cb23014b1033da00381ddf2dcf408660d1eebd1f6c7bf5ae9fc3fe47e75ff7ca482716534a9f3365f5cdb48f3d59fb19d11bb8782ef96e394296594812e8a7da23a953f6117ce577e55f3d6cb1d3a4007dc7d252c7123a8"
"37be12884e54ad10757af405beffb5cff189133bb7df5fc009544b2d62ec44fdc0c1c8240d4413af5b36e031510b1f1537a690ba7049cce9df4bf4dd63f6987c513992fca78a1cb7e8d670fb43a52ea2ca2f49724e35397041e5c75a365b510f40fa9bd076377274d6a95af801981d71972da0a08b536b024f439c43d13902878798153ed825ddd7dee8937181823076f036caecec170edf1b5fbdd84e530bc50a7acc257bb9679d72de3f115602d18d2d12e6ecf4d3242ccbe9a71a1483e7fe40d2447ba028a76aa92c13516ebde90dc4d204095a554cbfad79d6efe4ec540c7b51593413465b929742b729ca688f67ee9d9fe76431fa81217fb135d0dd6ebc91904efcb0cb6dee22867e5ddd7453f530d04935f41575de9ca457da55b67791d2e8b83890b5be543366b92ba6579a6f19f8e82a0bd87e379967766e5b0a58305b984778c562ea03a8b8392e3160ea4532b6ce5de74bc8fa0e8ebe88fbd62a73d7106a309f5a5f5d7617664b015e166fcd87906caa80ab4eb3e62f73e527b5d951a0ed0340fe17bb7b2692e4a31d14798879788fed12413bac50e490ab93ed66311599a6c1362fc60da5319ad907c7ef7852985ce86246276a138379d2004772d4d9a989b83b3e780bdda9825ad06a4b3dcc9a9d4d8025cbdee7cb2e02ea1f77bc90bf4ae56903859025b7283ba6410aa91933466623b996e9ad07e3095e376b11a27ca451c246d5561501e69c6747013ecda44f8d1fa50a75572453c9ddecc07b1aaeebc04cc7e976915f5e68d1236ae2ff"
"dea4b9fc4f8e91b03982801e2ba604b46ad80f966838ae09d2734c6482dd16d7738cadc1276593a336e2ce8cf7ce48d1535c7865f7b90445ff3ab9e56f58e254115bc07710de50d7953238d7ca419013d104d90fe79794995c28f219c963d716bf8942e0cc5cb432aafce4afb42f74596b847fde5d87fba9adce5c17fe590fe58e60379393e521ee194fe063211d72c29d58f7dde89addb6b0e20515ca7aa270df2ef2d77f92219781502c49292c6c4a985242b9447521cdef5a52b53b5eefcc43e8036ebe90b51a3565cbb180ea1b3e3d20f63b8f420c2a7f01c475428d5f63c66f122654af4edcbafebe34970c152767cf623eb4f1ee33931a79622cafc70cdd2bc7ccd55ecc1e0aafde3f66f5414315048d3c5c51638c35fa920cfcf7a18ada48a589c12e4da2c801cb8bf3b182463707a17891cf296ae8aae6a8a88ee3d602cc1bb7647861f65ec1a278433ae08d8c8e63727633425fda0b86d78378ac80b1bc1a48abf270dc2b5ea71691eeeb979950cbe0ddfdc451dcf8e3dc657060f4c3f96512b21bcb228a966381efa94bbf5ff4bbf38a803b6aafc719a545e4d0582a62e81e6468aa04eaf131f8d2f545c060651e115032f5b3579fdfb95a2328f5c9a0308874630e840ae1dcec1b9543c36267a9651c94c91cea42a93a91ba3a054ded4a8343864b449e46abec49474e218c8c541b00eb0f8997e710025631ac28be3f08126446dee0cf61bc69b85e4fc021f203c796cbd2ca16ebc8fa15f55510a08ed334155233c6459d2d428df31a3f376c"
"d81a530700b3ef08631dc5b50f787d4efe2bf219bd17f0431803d9d946255716e8543bf77fc44a48abc70a97feae8398c2059938d39fb4ac5f7214d92bb89fb9c45b6d117fd51f6207935beb1a89963fb9d1aa020669bf809c21154c20e720aa1178ed2bc13fd548e0d7d01eb1d028aa48318a02dc7aa412e2ae01ff59a86dae40771ad3f48f0fa54b6e679854be00deb9938e37ab3a4c9a96f3b7849ac75b82619cbc806c42f4bc4feb1141f6a8391bf9335f643ce5cd2791590b28b19d03cca7b5cf702f10ffa0317327e828deb4791f71500f243be77a451e5759c6c711b38f8f62757c54d7fc6dc586a90df7777d8cf1c72f9c0947af005d770f4a74b6c9413738c3b5ab32306ff5b41a6446c2de3f59a27b79d877d3f05fe22d11afd69e49e59f35b3725a0ad126642f388602b7816abe397a9c9233cf7d1e12a00362306d2d9b81fddb279544f35e23a8c198930f75986f26e6f292ae8debe5da0a7a5b8add2be71efc78179eff7fa2a2dad35863b69e85e8172073f434f48fb03f7bd1bc78fc2badbda261a68f7bfa171c898897b3b0d4852920674b8d9ffdb37ce66c1b6aaf9b375253a0d74eba4d359737f7fddb42471969d81605e41f615399c5fd6cce1808e9b511ac54f75f774e84b00970474f5136447af04b4866ab6c54aabf7a247c6caf3ee891fecb14073f3cfdc7368ac00f6b1c9b23e301e49257840f949a57c28a95c5c490bca91bf979d40403f7b9458bd255df757e6eea0bf41d5175548aa46243d98f2f0f6c754d6e7e58fbea97"
"7d7e0af8b7d0a6bce07d0c483293868a914a50aaedfb9b239b4c3c472381535b287a4146fd52e7bf882c9c3eff7bb2fae15d5b96bb1222d81d26dba563ac550e716b6c08b062cad6702a33a9db4274fa2e81af815e8325101d5a9ce9b345e29619da9e45dcbcd7b0935d7dde07644edc6b049eee9371511bb2cac50ec1170c7aad835c54fa52c8e0a0e8446356488e09c2f07b17413a7ddb872d05016aba129cc36de609831863747310f0fa443480a47524dfc5e1f34eef3ba2fefa29e596e7fff86a924462781930fab55e71fc2f06271e62878e51e0db08ee5dea31f1d2afe9a4f548ad6a4f4763c9d0eecbcdc32323aba1c9c12554a5cfedb5310b4a03caf426a80d725fabd557493c46f2a174aac851d3d39529d5ad919fdb7fb0dc1e5b0ffdf706a9f5af36fcd2bdde28d68c5af4a1da4e67cd44f97b555b62b39cee1274b7c3dd3971ace3da6101c87f9b8f28c5e13d4066a3e63543825dd8bddc3e90b6dc75bac78931da98929a337817f68deec6065f6f7883d5bb10cab909c9945f71a672eb2cda9fadf4a8d9da906e2a5d1f589193b4e791772663f1bbe751498bda065f90244391169d80490208083de39bec984af73dc99b10d85958f372004a03962c45c531b347851dc5e26bf7bcdd68c9b129524d6734282bdd431f991170d6a5c67138a5405d8005b355ec7ce95496a8e98782f6d978c42c30a17db9c12671d82f2d3e257f66980f20bb6380303f1e89b10035ae7bdb3e55d31f2d1574784aed5c95aa09aaa9614989d957a65d893dbd"
"abbfaaf30cae0cad575e39f5311aa00a6979fa52ec12dfb2f731a3ce5f8b6097a612c2ce98f5898eb2d1780d0cf9ad30ce5395ae871ba7ca6a0884a13c09732cefc5aed9d7a28c09041cdd62e75d7396432545f0c16496b7f5f516fb2cc603c0ec10a51ee952b7cd0593ec00dddf67e27dfe3f0cdc5bf737170243a8ed3c1f59733fb47bde4b6578d7ef11f95790d4c678d95ab2cbdb1673d2d516c189af00f996371077276e672f1223926fdcd6627ff86816906edad3aa97e3a9e7346562add05ec1a94c2dbb7f3b28ef537715a1d69761bfb8c2092e608311af2f79a4f8188665a48539944374437bcff6e59bdff4e4b9e4dce11307d892915071157698460b9e9fd68ee0d1acd21434810fc8ae702fb8dc794ad5364c79fdd74c8a70f390556930fc2a23064f36411c626179d1d745d4875f5c2b37292cb8ba37bb78d419f05e9a5d2245a38da20b6b14eba2d5ca3d58d23bb5ade1322cf337eb75a97ce98c167b6305907c3fe18038bee1e2450c3095480f99c9f12d2b543b33866e5546a39d539c6e2d639356bdbcbdb3b4e0935ac76e0fdaf54cfdf241d2c5ce135324885f8cd69e6562f48979352bbab357c6861c66b4ff7d9dd5d32a8ab8b6e759a2f5ddcee847fa439a5f9e3989039aa60751019eca6c7dfcc2464ca4a1ae12f079d200961797cb0e52cb046d1f0cb1d97c4699e07f019b48edd6f4a71b99ba26c2e5e72745cd9bb9a7e89d8eaba646461bb76818fcc447de2820196e32cdcf4a57c527c52f64d316b513f6a611c929890be5b0"
"3b3d3352cef23bf86d0e058b1cd9c4a10a9a01060aa9c9cc4bf42c7c6cbb677724db3f0c3736461c1828e67c9916e953057024371bb4ad8995672f760c47574bde9df9e73af90773cd46c9df8cb655f8c37eed8cbda40da06304471e32bc828a7dd9457fbe4d63a15633009c1a9f003f3db7f5b2b5e3b22c60f747d5627bce3eb4398a543cf24b18cf0a56728adcc253d7f5343245c1426b5bcd9daff94394499cb6d7ac2b4e63ec424c66f5dbceaf877fc13f47e744aca7d8b5d89c8d5621f4e13488b141062ee04c2312528a0a987a5d32ebc6ffae45657f4b2d1420890970e363a124b75374594dea0560320b36133e31d6a978f90ef079b81484503c7fc3edbceadfc9fcea06f271a60ea6c5d434b694ace1b506eaf013aca2c6103acfe6c565a5a24cdf638f8ee282ac812e32cc2662a8e2d4a31239952836c4896870d973bb65b280f0370f4c3a54c7f4723b2bef522ca4c233d7646da3fdb9743e273afa1e3bfcb947eea9f323ca908bb4961b214aa906cca1d2d56eff25d60952cc5897ee6390f9af4efd5d48b2aee8734cf6b8042f2de75b107f8d135d9a63148e88e43df815fe7871a354741f8863af4e114ed0369515bca104f8d3b24a2d740b8617de3e96a23*0", "vilefault"},
{"$dmg$1*20*f615ec6c463799eccc6a2dfbedf12c6bdc422a2a*56*a595f4a81a490e7aa6378034661da57a424f922c971d3db3f856f8d54b0784bcc5d7182905c4237153c5d250b8aee1d26410b1dca7b1cb73*48*74a060efbaf2c79d5523219d8162c425befbb2094fb46e7ffaedc7cd4f192e6f0c47d8aa91e0a3201346725d3ddadfff", "vilefault"},
{"$dmg$1*20*9c82b419bdac1b3e6b71f8a6b99a7501f34b6950*40*5da479e292e0acf67a9fa3e24d0a767cae2f645ff63836665068637188f4b80295de79aabdbc2536*48*9b136165ee73418631ccf28d5e77073788ae921df596649a7a7789585db0f13f446d5927967e2ede20ce8a4f5389185d", "vilefault"},
{"$dmg$2*20*839730be2331c69df4f729ffe8a10c26653bea94*32*1f24e25712c2d70d000000000000000000000000000000000000000000000000*48*3231e20aa642889a7e087cb87c84ba1cd52864007cfea677796a6f52e16b2609696dde9230aeb5603aeb1f70f6701be6*14*8192*75884a049d2b7a40c14002ab6e511bf3c73ca79a2bb8285a3d2ac1d5b9b0cbf92d4a483fb762bae8485dc3fc9cd7a54141da2b74a86ea833d253d56f52eecb9dd4d40b9f846690378cb8a5db74fbc6d756ef9fcdbb5d21805ed43a7fb45d6caf6b3d2564f4a7760030aad69ed9e56789e8b2699bebfaac3cd73130fae1d8ef7f003e765e86eb84e990f3c24780022fdff3ba283ece4fa8d31716e5cb1ea22e408431eeb2cda1460217efda86461e940cb10ae602a84ddd22be53064e66c0973a04405ff17afa020b24f1bb4ce42750b28cf4e98c4f542576e712f3c2fe0a0539a411290f65ca763a94d865fc24b1beeefbb6b055db453da38e62bc383e74b188b86c54b62f589334de8ce3ab2e4643f76eb4db95bfc088bea8c4e88cfccd19b89b818fb698982f73df634c8a8148e4c8d3ec2dab02aabcf48ec0a78686fe0b4f5e589a067d6c54f0732e559cf9db5b4ae1f0468f5681226d3b03002cb6ec528b96470f1d1aee5d3b51b4c5f45a2702830ea35056e02279e76fdd30b3ac174cd91b65fd6a26a192f6e632b0fae660d0861059a62bc512f610f4974c22993bbafa364fd2e8eb53d07244d165f990c876320d99070fbfa6fe7e0ca42c0ef2f17205ca"
"7196376d4026a8a93fa83a99cd3b6cde354ed3122dfc07ffef91c24f2036b0d83467e120b85a92fa04120cc8f7af3196adb6420f519c610983d163964b0cbd048adfb89266d9ccf9845cd17ed04accff9d106b7bfffefb365e97357fdb9ab2d0956411c0c73bdf235a9ea4b50962c8f258583899ff2c0bad6602e8a3c14f3c870fa14686d15aa17f5cfd1ddeecc7b061cb5c00db7d198d083a690ecee97a1b4b0251349beab744c4bcb53a4c1702d1094f6591ee5ae15a29271ee3d3d22f0f833219c3676236c9e9620a206ab6ab08fe5fc663f4f2ccfdae6e34adc68e59fcba5363f44cbc5d8345f184ccb38d52bc2bbe6ad996c3d4316ce644698bba6044209d108c698c3d18f4b64161651224cb015052d2e9bee0079b779d77b6623e9669c4ff99988bc612c4099f6b8bc9719444cecbc5f87bf9ca6dc30f3b346c3cf20cc342cd4d156ed67c8be0f1801c3e672bfdf2fb9e6c6f1ef3570d059405a8a0c5bcfcd70f7bfc1d2417e3ca205be70a5ffc9b4d1d123ff64cf72b20df25e9861e1da57fd1311451e542c25100c19d1d70bba2c26752e4cf1c59a6373fceceebf2b4c392a45e2cc7151f4cc1c7292720b5f0716cf7ea752a8a44cfcb7f638c5387a410efbfae90598f2d99cc79baa298e30076d5ac8a2094dc14d81953c09fca8b41f88cbca2274158b93fe5a151b93bec1fdabe1a6c67807d5f9d46b2a19ba85f9540cfb54656fe473216ee1922046c5b6cd08b325e0c25a420765a61e5f7a266c9e0ea1148f0e62ec65736d4cacef77940a0eb"
"24e93b7b656e3b591f5827e78b577b628da26c1e5bd7544dd439d15ca21a3fbe96d3833ab1bddbb03beb8f0fe39517958b7bf43afdbc68b5061b41145e151d228bb5e5220b31a86878be40060839855db438368e40dd6b8d534c5c39009455c0a783455b41b572f2864eed60e5dad80979b97efd6dd08549c154b76f748101396847efd56a97b82cf62a25e26ecaebfa35d545cdf886ecc22460cc0e2983b9da14ac41dd1e1dead58a2c29a85f6bc900268d755d1158939470c4793359b50da19addd3d8f722c0a889ebd8dc69bd955b524bbe452cc98834613ea48d7a73a9b93820c0ba718cf664d82a1745451a204a2845d4e2a846f0f18923ad0315896b1c1ac1942fbdcba119ceed9e02b0e707b28feaba44bac94888ba1a31670cdce6348d58d2072eb13ee805d569815fb28749c392d11eb06d8b1746ba8eef3313072fdb4685f1401717933fd18edbc99e3d89d08a4c7798bc1d724d6bca02a31642ca0ac6223884580c0be8f6508a6650b783a9ef24de3713f65fadcb2da6d68c4bbbdc216ff91ea7bd24bd7365b91087c14edf70dbd4eceb2676797ead7fbedae77a0add9d22a515e2a79d075958d8fb87aa62700c62df007abaa3a5e002403205fe04edaa4aac3da6d08ad9ba909974e9091148208db90f330b2c2c702521d4b1b32acc4fe6b7ffd9f96fdca05b6c404afcc789fb9ad8c52063fc0f9b9cb4116ee11f07aa17dff57b889a4f4abaedc51a07481c1e954d78ead32c6e808d3eafe7cfa9d2d4ab4886abcd2f64ba2df2d8d507cabfa8"
"d01f785409d71896461adaeb4e34d18f9b2fa38779f0932c27ba2f3f75ece12f6eaf7a0d728dc02e97cd44ff175b592b8234c3e3b5491726c58dcf0a1b77698cd38d861fcd549aa793f8d2b58d6afd1d9b7bb96c8936c960eaa7072c00e69f68f948ee24494b8152bd8e5d6923c8eb26023dc660d202e41663888a8e8550092b5e1610452c79069b3cab41a2e7459dc0d361ded09c9f1589999623f6deacf276eb72996a355e4f7dc19a5217e9dcb2d6a3e4679bed9f980a5dc8f24a1c5f4eef00d706566e12ac8deeee964ab9501be5e57e326a6fcb794e4f4fe14922704206a343724913ca2e1d26e3d83cf994cb7aaaf9a916ea6eaa06987a9822c5a8e556b16ad72d5f5640b3490d6b0f290f9f2db7c3ead435e534406dee40366efb98f0b53930a83ff9bad177b84343d204a1083801f1d68b3aff78ec4246f670f924969e4608b419ea9f5aafec40d902492f62844d9a83d65f38af2531b875b964abc781b3537c708fe65f70a11552990447bf6db287412367ca918a39d9e2b2e228451807b01174afc33f5f67d45f9c765015da6abd318c980fc8bcba60ccd5193e7a8caa54193aa83bff7b77725be99780da88b3209a3cec620c17f979fb16e640473b0d98a2f492702ab99f2f0f83bbdcabc2a6dc4986476f420f112ffbc7bddac8cffe59e82ff558151b9160e2f99bf37a05654253321591ef31d01b32b8d69297b3bd57f127e9f574fd472b6d29b6e9a0e1fd43252bc1f1b2c8c959f3f4d80177b4fd6a77dde8fcbaf1eabcd5e7f6d38630f35d"
"efc161ba7432cc9af6bc73baabcb343c469ab18e4cf88eee21e49311b4f20077bd6e30705338f047a9c7bbdbe4dfa6d7be3a827c92823a3c8f36909f9e4df4dd91426b75ac6b5d953357929b0bcd91ebd24e651a855755edca82c4664d3c89fca6001ba88688e5ec8d5e5c3fb145b963b29424192530601d74e3b815be85ca44640ca89c57ec4ac7084639b82e23f065ac561779c040cbfe63310ec846db02873203feccc3f88a28fa78d8d567905abc9f8f561b4a29ec5c380849ada42100c15efd3d73fc203e63a315cc27b82f62c4ca0df9ea213dbf7eb39552fcc38edfba0ce7e25dd097bfad5224369f1d2a175ab88ee5a3371daece3342e99c60cde76a1ff5dc7e5ebaa7e0fb59d4d088cfbe7704126b2697d62d7b82289a35ea778ea4ca347410513513084f1fa971686724761f711a916ae1e92402ff3d52f948fdbd9c1d961c6ad6923c8ae9cf3a4eae7a9369daa5cbdadfc786e873b90ed1e8f5933ebd011081ae7ea236c11f0c53e00c1c0f9206f91e6954123b5caa08c7615a787c1661dc17f297c8ed2ff6c90dfdd9a262ab5e9a4489d6ed7ac032f72bcbbc2248e7f1675e2b2da0bf85caf89921fcd8e78403f11a28970f673ec7adbea798b3eff87fec642ef77c15b3f3d19dfeb74d1ef6a38ab938692207133aaeaf722aec4f6082a4cd742bd37fba0f1f83f01cd2fad6a169c4716940f7d74b8f29001f406de5897a5e5d813b995df132cc57a5d9bdecdad9024dff7dee8b89189d35085a70bba2e5e0a8c1c71cc593238f3acbd1337b2c"
"c5a8647ce6bbd669eb939279d3b964d661112752bd7fb877c4c6ccb5ef72ff5446410286fc69347841c5595a3408e0c73fed8984d0c0fdd2544a168ccfe41386702f6ab7b3675a78b57f9782f23e0471e6dceb176dc9eb871ddd92dc0b86b2a11293523189c75019200a45213f0cbd86823f65f28cbe6569a58512dd469431322b7ca5b9b8ca57e56a139dc4788ffbac10fb57441f2435584651fa572450a4719c8c9b4a322f3aaedd3693a55820c725b63096d3f211d830d39aa89be83d59b13145dea9231266ef6b1eb1fdef31203922308cff81b166426d662989a350ec712dba14ced58df7dda0d0fad05ad8d9c6b247307d481f79e6a3cffdb2ab9b21a8208d6d7faa72b6f22a505d2b950884474862f6f67effc81c6292f3550c4e8852c39c52d952648b256e961d478c0c6979300c5188c490ce5c1e34ff6dcfca63c0f0571ea616651ef6f9781f2d355dbca208e56948ab9e26c5d2d3f8509952bba3e93241837b11a89caef6c956c9354ac10425a6d8d4e82bd5d7411d18655393d7c542a7c914a5ea6aba717a226e0f51200cc949f38c703f4f6ce452cc1d7d6ee8acf26d34f74981f6850b11610c11d1c5e6689c1b6fcd6b6e997ea145851c6655560c33dcf5ed7315578263c39fe6a838c5de867f1b3cd482c0206f56ebea0617ae25b3ca8d7e13849bb2b58ea4e21409762d549636bb7cf5ec32d3216d827d94cba1f36e7632e3a43b3203fc596cdbf879d1aaee90804fa0cbf46d08ff4c40aff8fb2b46f7ba8ce21d17c2d3d025b67702054e"
"9d76716fe7b5c9d2f43036d86e6a17924d2f160f91110ed1f3364a1177aa6193baf59878ec84f450914faad409618bf25cae17ba5545abd33833ebf408990fa4236d322089aa42eebea965e59456250fa14bdb61a32be8d70372891a83e7bf298168c5431e0b326229c36c667217bedbf64e3a07019534a087e84cd1a9cf35a889d9e65a7be63e8d638373774148e127b328734963437e7f00253d2fcce7bc0d798c09326ccd4f379f8a29f2d308ab2fece6fcadd653b1a3ba53a078e51a1a87e8dc03c5c118444d82d9166c0c4c1bfbe8ee09be6f8cd497a20132d4b6e1edd13683b363dc6587de2f11cdd51674ebdaafc41654d639b6cdbcc040f5889efb1f64e1b873442493ebffd8f867f0e1ba2cc629bc5239ded578336a9e88ee8b2d1b71f6d9303cbfb8a35e4015d2f9ec25eb4618c2ac17166e8964b68a66e60cb7b464e36a2251243a218ee542dac96062ec7db751273435dca23bf3e8aaea895ef1d6f6bdc98fcb6a9e0658dbe734450682cd1a3fe16161a9fbd035270fc86684971e20f1f1869546e1b77a481774c9449ac6499f376bc3c0f0efa589abe3bf676fb385ea50618c681eff6e5359678f078292da285c4b5e66d5ddb43499abc3558490aca6481299c351c6b053739d0065c187f59767e7de24f1b7bcd2d80d0ab2e7c789a9f5172a8411a88d2c69d8f9d2744ca7e42ba8478648df29919c23c0f4cf14e2428c792f2d8abae1073b97d86c2d5cf2e5beebc7fdfc449ec3804a81199d6c4f24d9b040bd1feeaf141b7eea626c1fa812"
"e499b74e86dded2641ce3e11a04a35c8b8831a4de563c3614b4048eaa656d8dea460d2c46f6d748be434718e9f54934804756fad07d2a8ace694bccbd7bf2e33c09199a22a98726d2e1a690b2a9c33e39c8746d8125d93f675c571247b0a060114eff4c32231898a05e3ced4721edaaee9ebab9b46692c65f086d9fcd34b86a499685010ae0f4423625263d0a2a62672624662a6613bd4235b7402573af1b0571c364f7c14e277b84e4a102b1055a1456b912431f9ce9e875056f8b48345ab09bf06b3de6126fae32e2bd61d2fdea29a2f3cb46d963fa40694c02657352b9b9918bc50fd7e26584e51ab5e4bbcdcbc18b9bc17d3efc5935ae5077a269fb8e912dfc91a2c287686590c3e2671f6d29365c044fac2c077fb5ff280b0a4d69eee3b9538b4c8a029a3360902ee8291ca9f1088074f307392b70a7a43ceaa07c47d175b286c052e2412237da3f6acb1eb6b1ec386dbcdf5b49d2391615788f401ec234b58b112d296b389ede47243c01a1a6d18ca5dd3f2646d483b97e41370faa1c023118a1d2006694debebe35046f6e5852952bb520c9991cf9dfdcf89e51fe29d3cdad6f1091fc7c450782f06b09cb8aed1e1f95221af7ad369e49ed672fbbf2d255549d0fc0398dc6b4d37d038a8dc9e8d9b4d6faacf3c5fd10663107cec0e171ea6e1c26eb8a1534646e0813ab0fb449d15b4865eb2e9914d404d06c1e284f66e39d09e99eaf7c2f36997ac6ecb9197f8ea7fbdf7da38e427dd5179ef265f1471a096fd24d8ea2a2ec3b820c54356cd912f06"
"9accfd370ca945e60c72b5d479b15d52a5c3c4423c73f4ec06d9201ddbfdaac2e304b1408674d40c203ed48fbf4b126904900349228b28fe262539c9a12270632f28241198381c6e7174d275227c99178ef4942655ec95acbc19a3b96fd1e07b5e0e91488c979e7e25be5ea733bc3171b2874801157c83a6de754ecd05cd78d6d2846e7ce19f641bdb53075dca078ad0ddfa871c16e47da96d007b5e2b2854d151dccfad21875fcd12df56dee7f4aed6a54fa248ba2721ab2f58c1157c85a3df8486f99295f2c9b8e8cd7a65145b69ca93d0ac4fe328e31c07bc1d0af2db886266def575d74be200ec9a4ccb0213743eace8d7d39f810e3877876082238d72c375a5cbdc4d7de36c2ad90904a173df80195cff86f19a0904d18a1f8a92cc4779e5997dacba58770c5091dab5b832dfaab2d0fd102b99e3b8a799ac6e7357b294a31db5f9bc3d04036a4a6e18dd47dc88b0f07e1c4271e5106f329731ce4dea9f56f6d63beddad788d7eeb955589a13990cbe3454b07f63477642613bd77f3bc5d024dbc5c55a0c7426ac7cfe63dd2da9f0d5a7e816dfe5856b646b648c302c16b50296882c62334c9b8e56ba6dab63a9c787fa153d04e5e64503c6bbb9bfc8957d2fa607ecdd3714123dd52b6f9c1a3a73f649dfe67fd7195857955cb8c5470a9f363116cbb580b793033280dfb63ae47b384e6aed677251b63a7a27447f37e9817f10f27c4a0560ef34c0255617cfb90769aea2e5971077cc89022f8a44493d5157ab2962946c7fe600a24f002cfc6108d345"
"469a65f2f29b55e4da3f4c767324f173a11567ccc401628f2934989b29875ededce223de3134b7e99384f94436bed28329daff8da5690984b491d43f14d86d5a5e783545442f913dfa39f25f6360d2143fbe4c7e234a40f65b2c48ff5835c3fab67a92d0adbac9e63993db052a832b1c7b6045a495b82ed0d7f1068ec96fe1519493f7376a9f9f331f6ae89420fd1b523278df3e78c7b957f599767057113d5a1895801f1fff1b7021fde8360c4fc1ec8165132244b680645df7a1c0673728ca6323379739905856537091dba18f762b7be6f5f7e95212c402b005d73dce6a7775e90093f927edcf0d9ca24d04809f953ece372414d5f987ec2ae030dbb547db5ec17bef47dcb097fcd2fdd873eb93a99e2209425d4fbb589530fe41bdb5daf8ad8f83e48557a01d2ff6b658368e39bc8324cc2756160cdf56b8d7fe231aa03e82bf0b3f55eeaba71133a6bbf72342727a52ff7d158992895c61c0bab4cfe42ba5e4d5f239ef5efb6433dff84a02e2a5f12bfc35c1062e4103a3f8fdd1c5be28bc83725023c8a72d2cf5103a7c97a23b2d9903a1870726ad2bbaef7b7a6dac3e36c1b92769cb3f43eea1faf95c53db0cda2a8bea38efc1dd11695bb5de4baf583b175a32d49f98c37510e9e56f3d9e10bb4aff163abc91a36f24fb38d33d87fb4299d5ceb5144c69cb741b03d35436002d7740c38753e284a808a77cc1d4ff9e63b9ece720e778497c25b46ccf757449cb3b3fa8e5bb6d5a9f6eab58c97e9469cc6192b7b31362453faac839327067f41f25ff"
"34c2cd40e9fee3a0b8133f266407587ac40db20e7d7d397e90558e54250111f540a44a70d427497b5a06c8ef87f6bba0082e00d42adc7eb38e890dcf5cd426c1bc2b4c781b07670382aa0d13e227e05c1987d3cd0241b5ad78387e19dfe4804189dd8a10cab05c79409b9414a6a384cfaadbefcbe8e3521fcbcaf52d92dcf1611ba3a824b576051aa24f42cadd7b7e9841375646740f2a6271d81d2d5f4819ae6a5d3f1feb6f7923f4252872c3a2709a8b8556b3977af8c4423bdbcf66ade1b3c4303539e06957e8930aea8ff70d6a202407aa44c6c8dab0232a33ff3f3ee9f61ed664bfadde8d294022da21b10e0aee583379d8dcdc078639cf3a1ee18d6ee1740bf1b917ff56070bf807b90d5a19f37a5c31214c6a19532f364d463595262ca057f5865f0d55636ce080acfd4e303f03372af014a3c32d2efec8f7f6cd6c825e5edf309ed16008e50aafa2584804c1897f6433e350cd91e155ac786dd9c3deb22a39d69e85331086842f32ba7cb6b4d4f13e08d90acaff24315020f7efb2b74214b14e840d739378afadcb06d45e7bcc17f2a03ed54d0da71d865508900334386ab96e11b88d2811c84539e4e2a93aa27d66620500789bb4d595a8b2e5972b1805d88af2b722e1e9b8aef10ca3dcf5ddbf3d20a6f101bf8f8a8cad825946dbf0c64193689f461bc0c62d138f902575ed601e26184a10ed9df17ad4be7c9672147c0158f132452ea502948a749b474cd0a63ae5cf942609e4864985b4060239d0cee6c78ce4dfdf5750b51ffbd5ee920967f5"
"dcc52df6771e286eb83dac1c576f1a073687411cef3701ce6de66ed17bfe0fa5f03c63f96fb40ad70b478aae1e16efe22cb9e8c2aa57d5498803d35fde7f920b32ec686e6091a9ba6eb91fdd17b3302b760d084bda32244f704e14af619a5c9e72bd14c4e69f51177a26174c16d2e3eac934f184d460df5640fd84c3d3dbbc6785c249a501203374c0d58852d52c4c64a6d70ead2af1bca1d61f6f4cd00c3892565e085d3e603a0586d176f478062b092b205807fe7438a065ae7dbcb14f69c92cae4000dbd6804bf4eabf112813ff0599a29b1fd8bcf9d0ba7d9b14e40e38826b48204d8c0a50fd804167c88056cfe77e7a75ac36b5bd049571639b3f02a7e973abfaff1327080630a4bbaf6a096005ca2ccd54f076f2c3311e6e7b48bafbc9de38d01c8a01ee41d25ff0f775a2db4e34566e377683bad9a133482ab87907769bd783bd170b616d48974ad332e3defe94a2e7d6eccfb4cc43cad93b53c476e7795a087fe58cc074b591315daceee3c02af54d9beac8162b70dd9863bcd7702b7c8c72022856f78b2d249cacaea6c1dbf1317ca9e35664c518bf4155501ae77ecc3f47be6e7151c4d5fe56b893c69f1f939cdfd2b68830d9ea47a89fa7b3d4f620e0909d5a97f2637e2eaf223f25fb5ce7949e3ceb87d93db628872fc469f58a749e8b4841798ef505ef2712a3ba713386dc56b83e504c3d24d2ae8200698f9b3eca8d7971f7b82dbd5df6deb34865e2e6336fcd2fc3ff00bf9c8d04992f012dc9473e347ac05aff1040f010b1683c10dcd0bb"
"49b7b5883ceb6c0bee4bd2ea6d275f884a37fc7151245274f208a457f4bcf180d793de68f09c7b03e7e430dd34e553362f91c4e721926eafd54d6c8464082d2d4a4c5b4b44495ddb06290f01913e68c7cd95963242df31741eae89eec41d0af689518ae335aae42c60041154356ce475ba0bc7f6c5ec798cd7c493aeac5e08d7ef554dc23832161a615a6b902e1d4f7bd076f3bf045360cdb73c3b2d7c158b74d2b718b95189225a0824a38836d1d4dbc5a2861e62f8a8c2723cbf1fe8951860f0cf7b4c6bc4c307cca509435e077f3947b8fcbb8ba1252b89d61b69b0328a2b1c31255c2c9df670bc244af42599cb5982878fa363627b321302255f2a20e04b70e8f4f63638af83a98ba40c55ecc46230798224de084d2cc203841d91c4f049c9b0a98535f3f905bb80b24679de883470c8225af80361031354483d879f98b78cdc5aeb07b371fea8355d146f9bbe16c9178f3d83ed63e2812048a386ef85d6c35ad696936a008a524f358ec8a2e40081c3c50b73fcdc6199f59e14b6ee213a8161f675d5938ce72a848ba9e7ed930198d9ae6c43dd86d94d88c5312be17b9dc590072e382607390e247869674ff446e8c37d89b7276aa61b5ebeb0ab18f500389a326341ee13283965dd4cce69b666d2c114372cb0e5b5d9921cfdb5e12aea0d95ec0a73c8d07b3b3e0dd8d159d323feb4bdaf6ea184bc2fbed75e7cc13bde26aa597ea7eaf0e37aa4be069c2c629af7debd8692befbf74d6c9939165e3238d8b2b573001ce957942b199e5c57935ecf5ae0"
"c3b161b96f1f637605bc29bf5230fc65524041d9970e9b4bd6e7469e0c0bfb62e672b30a7094b014c27a06e3982d83a951ea4207a4d7b38eb155259b847ecba4675c3f82c48343a07e2d5fe16d3189c8dc0f4bb1fe2ca4abce4638a4462f0dd79d69c240eeac8ee4bea297bc1bd5683ca97a352712bb4461fd507f9125f895fc7ca8fc76c7f78207224d0fd142669137ccbac0f023fe1700eef77abc804e9b9da27ad5c3a767202a0d0a36f8fe86e2a8ac5f30303c39fad8b65a206239b881910f9d904f96edae31e4befce7822a7399ad06355bc3c7198eb1a4b2c7c8b4c92a604dfa4905109c35edb62dd3c817cbf5261f5069bccbcf98da9ee5ea192151237b31131953509157f833bb1b482cd011c361d768347b2d0da11b1dc43b392d609f0c4806d7325e92f9d76ecd278fcfb9d91e9993addffa55d66acf9211b7cdcf28c73bd4e7cf83a869532c90f9880bb963cec69cf40e117b3fdf9c0c5c9d6570a2458aa9d14716ecb8b6642a4cb1fe0fbcf8298ad0db3c676b9836910658f03bd47ded56ed210cb1e2f1088c87f4e225faabf29e2d450468ff6614f282e15b4a6fbcc9463a16f802d3ba071fa5b009403478f1088ca8a8d9eded648be7394aa6bb3590c0725ec87fdcc53c4d2afea49ba11f9f2b3231c912bdd9431ad941a7d89f70d8e1669e90553b047b5f4a033437fe3b84c05105227efb5390e6e99b597fa1c35a1940f513ee8aaef9485d1ffdf7ce94fd34dfccfa8f178dc113c32082e0345f6d39294ef283b6f9a566a87b1122e74411"
"8e643cd6a2ecf14e47d68254d26942666fcf957586497c72c9e5814ab3371fe4b0f9a7fa1e5d9629d0dfe9e93fb388865a599076e7ba983365fb3bf574d335787416c099c545feeea69e3069d841b62e4db9833e6865e24cda78e2bc46ee83ad5d79bee507c44007200e64b5d1329930bd658e6f051cdefdf758e5b023650c2abda7a6827ca394c086057c617dfa8c161ea1f953446d8e0d5f6d5c76bedde8d596d1641a973e2b53bddb8f7bfcfbd0fbe4883f4d6d4e6f930e51d47ccc40148e6ed1b409705e9a777f1bf86af2621cb1f04ba160a5faad78a0949032e9dd7e34bbe6b2fa1c478a990d3b7c474a2f81af7f7246bdcc669df005adf397cef71869237c53126d1301ceab14011a529d4897cb00f7d93f35031facdcfda8110b9fb5d55a057ac9087a9cc8f1034e03f79a806db8a8e726e8afbfcb2c7c39d3315ecad3a2e542d94753b88717b7791c66c47a45f499885f6c096cb1093d9dd6082ba8eb2132e4a80e22ee309b7f74af55530e190d73315023fe4b52fca855a06fd111fbe1125910f4ace6dcf228447c007cf82fc50993de0202d28aed32ae795d2d75ba8c975b78c657af*0", "vilefault"},
{"$dmg$2*20*186673f316ce762e8f2b2595b3e8ea204aef584e*32*df036556654b76eb000000000000000000000000000000000000000000000000*48*71793cfc457320157f12b1351051f60e59fc80a728f82f0156cc8b3f20f75bfb4289c65e6c8c21589f3dc6187540551a*2*5953*3c25089e22f54dfa868b7460f43185a32b6988681952eca4a493ff4699e2340f8cccd06ba2df28334dd01b83f8bafa3754b7afce8f859ffaf64d33950a817d5ffa9671894f71d6ef35aefd00d237f7f8f413b8b8424db42e6fe7bf503d1d4222d77d5c3c2a16f26a1e15d7797cedd59fbeb45f70ff7731cf8be628895f13cc2937f82c92e0d5c6b6ee0214c668ad1ee4f41501dca668af0f83ef252bd6b6444f9028f12ce15134fcd8610426b5a6a75ac25fa938f93280143b5c991a683fb008a08e133a962dd4e3aa9ddb57e72955e3a840c3599b84d874d61cff4236fb487e2a344ee3311d30a531a20ec800ec591607edb97599b297ac67e173a4f7d98ce2d73b66c37659bc75becb65b799f0a1642a4282ad623ee574091821c971363128e307288b4377e1e90e831b800936f2b5eb05fd5d0e505d71e7e34311950812131c5b742ea238bcdfacaf35e23a4b5b9ee2a7c0da6aca0ff02595fd4229baaf700eab8ce7ea772e133bffd5665ea3ccde2edf61d11e64dbd1919454f977a31292416c86e3e11b762a3c6f0c27cf1a07ba3c4197f21c8959e0f04fae6a086be6e77b47495d0cbfcfce05e34ef361d45b1f8c5068f0174cbb2ec9a9f37eb6ae1fb0887"
"17630b97bf46c801ca598878e6a8a96b232266479925e8f170bf76afa4acbcc6c7daa51c2b9a1821e5b5df170a8b57aa371019c240626b2f2a9d60587c34383ea7c12b300fb478e2b62ca9bf54b00f04f4970a68d6689c4087713e9b6be1e7c92ef16a7cd527d1ef33140d8d3994c07d8ae237e047bf478f164aee1c6300545bf986e570a403ef626c5fd14044611621bc5d5f37e417175a22288c2fb45b0e11e946f755fccdd774e5ace72bd2ba44be8f673235e9b49c0fd4d6a912493fa797bd97462de0402f77da7eee2ea6c0d02fa880ba57390eb1f73927d4616b95067d18103ad4b10af7a40b35e620211acf4c9f47fd12080b2df1d350d17afb649ea5e8a038157561b107e7d1d00284a59541c0b759bb424d2795ff1d3bfd7749461a9f67502df649d2d69e72036ab4f8869c7bb35fc999a9179612524e2f9bbb00e7dd5ef8fbdbfc486447ad5ea93b7220608aff49eebb98a1de88c68ce2b9846a63ac6b8878fd645bfc0c0fea6bb746b15301f58d2b9d2ace73828a623885fb495761be85780668b436fcaa6367776dee9e3af641ed5755f1cca7a931c97162f6879c7a3bf6eb47f98590d07654be8fd8582c5774f89bebf6fb113d75d28afe74443a64af360f41b9d243d8fb865039d924fff4586e3c76d9d0d43f8487200e802adb9e01460eb6ad5538d8549999c4b38c41dcd878b8dbd049b853aaa4426e74226fa19d3d501e6a93aa99dcea681f0044e15a05c2d08ae49f625ffe88181d2c1fe55e91b6f602409fdf961af1da851fff67f1e9"
"c9ac10dd3960f460bb8f937ec415870cb9e99e150f5b2a2308f2136960d199ccf5900f130a3f4610cda347991cf34fe46717071dd5ab2e8dc5bc20757fe6357fa56a18a606b25c51612975f51cad52e5a20a8eb2cefc79732fe19baee7b8c65167e2949a4ddc8d1e262b47c97286c2d0fb7078b3f553453445053d82a865320ead1ff4bf4fea84cfd7ce21e7aee696a15f92da1f3d73c394d47a254247492fec3b6582c94cad0df1b1b097048c9c91bae6aa269f5a074b796bf86770059cc767aa07fcf84010b1686437042d16d693775a03d9832857bdde9f7d98392bbcc579db3bddbc58d8cf08f04064e3eb92d87829e6617efab245cfbb6d564c5fa333ef560d6105c525e39177ff5530dc154b691b1dabf14d0da99229a04ca5c6e7956d474c0ee578b1b287b0a5971506687670ea848820c44875c74e69a79b36eaa3cc2a5a27fd5098f0fd3c190089736a271ecf3f14b3259cab95b941bbebfb5be132d875328a1b0ddeed958e8ea454ef80724f878a2a690bef56fe3ea62f47cfb6db303ae608957dbbd57735195d6b1b2ed73e69d1ac4b4b4fb01c20eddcb29e8b44bbd71fc25515885a56b8b7e55edd4c21d5e8cc43417e94e57cc49f279d0ed740b286d4e27c0b909729c4250ea2d1857f3f7d801a87afcee46f455f8a53e211fa0a311006cdde262ad4bc47941bc52db89c4b454b7075bf29d9cad6c98b7e84318a071789a78d1a83ece7a24cbf17691aec06c5fb7bb8a832c0aa33b27a5b3a68ef36364fd85cbd19e8f75e184c3d1cbccaf7eb"
"c71211506021ce0d38bf8c0885a205d7f4a60f7fbc972c7e2365b07d5a52fe8ae02608c7bfb1650ebdb4f2620f2698f5fc90c7b42a34a31732d2cdd12a4bcae3ce399623211946f74c67c5e82c0f53701bb4460504e17c1d6fa14288a63d97a86068be8ec36670adc16670b5cb3c09972b596cd441e4bb9b50471708bab77691417517e91883df9f0b353c2bea3d0acffe5410097edd2b3886592cc70ccaccbbf64d168637a8a3fff0d143e497e5311a9b13b4adcbe8d2625dd1fcb5ffe9c83ddd4a1cb3046616296faed945fe7b29ab6f912be6959f8768ce28958f2441a1e161147145a1621693b9f2d24fb9c7a89535456dab48dbe15c689709e2af6a6805edf923d8504f3d2cb8220ff9966f854c84e9ff04fbf45e42a5c73df4f719b9ed287695a4a03d5c0a3a964a7b6e95bcfc36a292b23774812e8567a02cb8a5baaf89afb900b3fb7be40c9e8432656307fbf2487c0d1f3baeda11e803f9f298e7e0c478f9fac11a43ca32e2cda46ca6491cc7b31aa1725d24805587722248dc326cf81fea4fc1ba9a58bdce9e34740e3732b96889b36e917cf029c7027c5cc985f8b3f0fa4e504325d56c7e653ce903e8410a6b06a2126b3aae2030404441273c1e486bc8285dc078c1874635e75cdb753a0fa821567e8116179b78039f8cc52675d538fe38a71f46792af445b125dcee671bf7789f2e874b25f05a431ce574a2d85762ceade5e5cfebfa5ff62b1ef5ee155fe418b16638c1562b29be425e05ef0237f03bb42181f55d4370272a13d5fbb353358d"
"a434519cbd0e4fca54f9cad4a7735238098d3984b0cb9360eccfc63b3b4339e0ad2b2719552085d7445681c919f21a6b482402c271e34d7f9fbe4fbad68eaf825c57d22ec0a2c5ddec8c1273131b867a3760626abe779e37ee632f41f212e9a9aaf26fd5cb28df689d9c4875c49db62213faa1e18c35b5d2df1fec21852e7c35d20d6df85ca2a6b10898b244da31dbb6de3a3a8553601c0dabf1e5f4755fc77c1561223cf0b1ee43441c3aa9d855df0831db6a7f6949ff0ae1cdd465aee616b789c268417de07e9c0f0ddae6b07ce5186b3b83ef96fa1ba9fabda1bd79986efa852a348364e33e89458550049522e64491a9b24514665af058b4be4ba690299d3c2379b25ec97575a9312b38d3106f805e829bd77033f4d5f1b35ffc7289c118749b31f17babb56f48aec597049d635c055d056db0434493a379d15010f3325690444e1021abd622d18ea7e0b5d5b97054708ea9087b4721bf857e3504aafec84516feab2a6f6309a506cd3e931ef3ef47807feba8ff0b6dd56eb83349d99be8633675eed19be804c06d4d81b0a256ec95cfbb2b6565d7906537c5adc404713baa8fc2e0f425c577660df47198e91d2eb3ee7a9a5025641aaa759e7e1f3dfd85c83a17a6a59df4af62bc669f28d12544254f4e0527a6b10958664af9378e41aa9f88ef3041ee6880f23a858254b5d0fa7899655e9d06f12fa863b63c2c950a0c3eae774149502f0fa3c3a44d24add7f9426ceaa21dcdc5408f0b96d63dcfd97dc4a3ce03ccd56c8d48ccb253e82d50123e8a51"
"76ae5d1b9cf6b6c11d2decea9f91e9ddfea605eec75391ffc4e01f4988c0ee78ccb3adb8a5e16644eb30e7e76ff251192fb3a8c48a68224a2cfee4aefa616ccbb68abea13d335a4b212b0b9841a42b418cf413fc868a842a26950e11061608a623a5dbd520aaebddfd1a559705e8cadf6abfa272925651f84130223b0056be28b618bfdfb164d2c5db86d82ac0eb2c457198a6cf8b0c2f2560eeac4441df45a9192cdef63a00adee0aafed7e0ab0bbb0c0b9a066f9f45f5e0c6a9376a069a45512081ee3edd2e9679d6c46d71e3740c5ada7457fc5d21610edccc2bef851d18f89e8307105855da15dfa749c44370b8149de48309f99fb5040d05d0739a64cf253855c185550339af73be6d5cc2de3186ff4b004ac816c1f4afcc83ec3ad66740c57b9cf660de7ab97b0771189fae5957751eec58a3aa6d3ec6121bf767d13533ff413c84c1ef47142f51ebf515c3d60a3c5cc3b9eaf9d43d2a84b94ce02db3f254862cf3c6330574fde5f8257c215c416ac3c9833839d5b33436fc12c21046025a4b0be90f18dbf002e001b8541b888835ad138def9910c4546fa0cf496bb4415463cb10004959dc6b0e379c18090bbd1aba6e9588fc21a89778ed1a1c0533049867569691aef6bc310fe4853e9e9bdd94a58943017a197526c70d2d278c66e94aa97abe5af8d9faceb0fd4e102bb69c824a1e4709be2125de420aebb11506bd62ae6b32eb1bb2cbcbc35dda3c992193086b11203775b33dcf4206a976b31222fcfd8b0e6beab7eed02f9f6d0dc2959929e1d"
"30c856a672379ea1a20bdea6e023fb7ada31f6f9e02f354f464b2261879372c0c92ea462ad11a83d54bacfce3febcafe14753d697e905a7c77031beb83076444aebdb99cd1aa470d5774ed91cded7eeccf7fb18860fc39577a054b17aacae86d02c2dabbd3ab068c982cb095d135c11daedd863bf9abafe991656d1f7773cbc05aa66c4c800b5763fe845d06c3b19f4f73dedbcd50ea363aa11e8274d541ab754209fe7fc159e7bbe317f8d9ba602bde8fe02171f8daf608bcd4663eb401c7a3f2cc814bd8fc195cc192d4d6fefbb15b9d9738f5e6ade7826d65b9d8477ef500afe2e40077b6ecd7d3ed78233fe980332a313fb2fe854d6becf9ab4c1008cb1b16a513d3fbed8036ddaaf372e8891c59c6e9bcdaf2d88e22d528b975d1a36af2fa792028a3e1161a74545eab1cd6284079c2353ef1c49e3e1242ea52d22d8c7d64f553e4c396e7d62c4a6619ec698b56cf25cecb6673d8a3a703f65e480f1b8b91e4427e9f1e9dfa1939134d03cb3115167567835d449f50cc9bae06adc68e3211d8e0cc1faa34f7bda6e1cfb088fe980397f4643e89052d2bfeb233ad81c3cd466bca1b1007e2e6459e3aa1e51f1a326a2f5d89407c05946b0dc7741f458464b5e4ceea5e367a2e4f0d007e9e31b24f5b7bf69aecdef4ef57de58719cf9fb5e8f5366452013a5bb69c3f1807d83e26bb63493dc141ab1ae8eeea11c495650b346919de060c4af1a80823fb10b4cbc333b9d6d05c6a4c293a7fd524c5259a841500617ee442222ef2cfc71a0e4bffa87903ff5"
"31898a44452ca2b132c4a633c91c7a24bbc885a01001988ab845e53a350c3b283dda71360c7a9b47ae40f72737ab6be068ed8ecbde1d0bcaecb729c5bea691ba0de6867e6e6879fdd99efec2b6de4c2691ec9031189491a01329fafb2f0d0cc28e26a22bf55be6ca866dd4a473153901f244c63967e829d9ae2ed83451a365558b697055a3b9a6bcb1bb40ae56f13d4b60defeb1a06cc6831e175ccbdb92a34462e786ea28e2ff25b813b63b30ea3b8d9a0921a5a5bf45576b39fbab6071fb1412670c936b5fc31d668026d297c5b84739021c4e763686e4011a2bb7e109db8e1d6bc853235a44ddd93f1012f7168ba3091a2a92a3e05bbc761fd97ebfa22265e6c1c2bccaa9d327d4ad61de87d3b5f0c5b29e604f79827064e05eede8b574c8982bcc0439db27b15bd7ea9a38923a1982fa7063f9f1572963c75168d53756803f6f60604ab33388ccc1294fb0ea143fa5e128a060da40f4dfa0382906b878a602c568f3c99809cf1d5912f224b2adfdcdda84df149217bf8edae18fb4bd825900ddc57ecca2eb7d209ac44e06e674c2b7c126756bdbad066dcf187344824050b16ff9414fe957c37a048c3a260a8dea72f7a12bf5b35e1c2205866bdf85367d94af939bf52a3027e2c560ca096a449b7297687bee98e4cc56e1449448461d028e435fef26f060097cd96bd605d5a1cf6b1cc95c49037401878b85d437ee43bcfbd7b2b8c145c05a33fe01226a637dd677bfd28c8acebc4a30494917c253957462cdd5a3d200e350f5d92c5c57bbbc7b2392e4"
"569610f35e3707aae8a481b8500dc8dcfac689a018671a0f3634d18fc7bf4f7c58933da452308e348a446ade0bdd6f02d29cd8d273544ba46f1767873717fea45f0e0980339fc187acb7045612e95db5dd9c89169daccfef2e3a01c4d19984f8b1cc960d054285119f23e746d743a0db459bdd5803fcdbfe92137e80d47c84c547848ae563695cbf113253b8a96e368bdacf59ff73c023d043348c1dfaf143ed13424662c2da644c25b9d22598813e1973f30ab103c0ada9ed247ca038a056d18f2e7c8443fd2c95366b387e9ab972170cd2b4438455dc73619ab3444da0d64b0b2d3a9d640ea917b1c09d17c37fd587eedab367235e1748dad753e4cbc74dd53017ba65571a5a65269666df0a24bc694a2d24e862830e7808ea8ffc1fd6cf4b29564c8d77d9692d7fd55e496c69f5f17fe145abc0dd1818f2cf6eb979c33eaf41050901dbbe5a49c8bf9983b1284fce92703b45c4131b3204fb9edd58b6cda3918cc490051bf9d6751b7702e577b700230f1820238b959e46f7dc3a3abad842814c69a76be5376c1e7b35e3ad7318b3439008e4c3801bd6754fe67cc7aed658d89550a30cbb1193eb5d2144eb7f84c5c6ee9e13947daa3534ad4902ceb9cedcae471547bf95e2337760322b55af97457d23d174b1c6f3e1d3585feb000953e298e35aeb467e90342bc61bd05af59c72921b2fd4795c19bba268bc6bf4f18349ca91b89cbd6814a62dffd4684ab78e998f7e3833b51ffc495ca3e789e685417a0d972bf4192b0c50016a64ba839da14c3c5bdd"
"58a74e96e56c66d73e2869323093892c5272aba5e6edff5a8976c5e04976c8bc1b8cefa630cd924b5bc7d28dbc67b8aac4d7571623c4d412acbfdf61603d2cdf1bed6fdcf8d88519a3ce3c4803317587c4a7dd33147f66aad06554d69138959fc3172298be9f5f83748b83c6618758bb45058fab1bbc1434b993890288a42910b91bd52ac1abe775acb09cf7173ff9fdf0e644ee94b000c8ac5cbce24d424800a9df431e03c650b3f4196115f100b49b7a41f68ce27e5dab5865b40a0977cc1be995d3504dd3bfcdc8db2a57765b1a80f6cdac0db795336bc9ffa4cc163df1d9d6e034d5b246cf59ffb2f81ec02ad4c48eb652be03c97a11427ab519d8fc8d704fea98d597e44cfeb168f3fc1385f1a1dc5926dfda78be4c3a3e1d024e4492e952cc8471ae1f26150cc065bef433c0431128c7df6c57bd79dbd409fb0684137465ec0687ec2ec45c6fb76eb88bb7bfb4df3fe69421dc7e0809e2474f987a59980fdd92b2a66ee31fb9560b4657a112ae523caec636642e44b507ed5a900fd65e29d35c89d252708b7f2c2daa29062b94577b0406ab9cda76c921694998192078e2ba7a90386e1544444c228db678f9c7da51a06b9c0a22ea26ebd3dbd8880a6e981decba2f659ddfcd15af8d06031e2d8ddc587417ab536fd4cef49372e0510c58060f2900e030fc894f1edb6aea502b0e2642a8cb1e0d22cc11a43cfe8eda906711e059d6e4a55959cc337dd54428eec2c123f5cfe185a78f442266f54213537af2f4b42176951bd9b0d1b70c61ef5e728acd"
"1a5b0c8f0360fc3d4106d1f1a6a100326500e25cf6ce2c7f230e5e54526c3affad6bba78eb0a275ef942e441919384b0420571655eff68e32cd97a322e22765fe736eaf329f41b2ea005ad56acb4c092b7bcdbf2bf3e54b058827259bac8bd94ea73e1d61cba79deb078857c63e255da3b8ed4bf5d4f603d8e3e19813fbe997afbd272102aef06950ab6daab60139fae51f0fa8b48f3e056a360f074692f982aac57ac3472539e7484862997ed283dda8be4b22b83235299d1b20df4ccbf0fa24faf392a8433535d3f3cc3ad7453b9b150dae24b8c78f149b53f5394af065082540b46f6ec3e70e2428b873fa564b548cc1e39fb406ff897662ac7e901384b3094c328bd484980c120518a8504511644b0616215df50ce1ab6106762d52ef24d40b9851168c69b3068682525f1050fa3ae139c9500f89d1b5a96c35f71e25f8ac229518a79fbdbfafcd67d7356bfc3e9699f0e5a8c9fceb068f810cf2c8e3042b5fef34778a3edcda569dde4fbc240996038e50e233652eb5f303fca7f8f29c633684566f6548bbc311bd24d7e0ba95da8f02917048d9777e5f142f83cce4187ec1af72b6b6c3825e38646f9f29697f6fe3b3cd76*0", "password#"},
/* test vectors from CMIYC 2012 */
{"$dmg$2*20*dc39029a22b86bb4f930499578d0dc9eee69398e*32*bb47bff69b10ae67000000000000000000000000000000000000000000000000*48*c4559cada09552ab075e73dbefa4aea1aa21209011946e423ca707753a91c87f6c4cbed3beae20a244d33568f852068a*6*4315*504c0c37c600618fd54da114fc0eb24d6f24585568543126ac56c034cd8d7b3dd991f1418d0c95791e091921c02bf695b7835f7b0da2c1b96524e72b4bd3f671c592aa176b6a58de77a35a26bd1d0c313b2ca23581027fc52c7c63f37439404218d720171d3b178125e6ce0646bd6fa1033f2ab7b6849b3a35a430cbd1401f73b5deb478d6d0f58364579c208c613cb2349fb19adaf98be2d4a74a6030215793fe4f1129189626bb87c23d26dc2af51a98e1fabf2f58e106271c7759d104b9e5171d8f952ceeb14317614b7a14a5313029aa4068b898f7e0f5b68683feff0d375f2ada37f20135df443bae913c7e96a29c6c3388b4b51432add89ee22826ad0b1b0a4ca9233e691f71a5ae2c76b5e5a135dc793e081dc53781faa4f844928db94084b53b39f1820c8342b563e3f46b002bc52ced63e4588388e69c9e85e2002438a1a703de411717d24ea88adef3051b27def61e4b9a31548d3714c3bee39fed866254033a123429043d0c08a052d2999a171b010ffd119f90bf9222462508ac914e0a68daf93f63caaa0c4302c9b1f6447ac3856b09eb45096b3a294731f110b90826b0d611e6e045397b07e5aa64afd271f1c92664e648af648642f786c0c8aae"
"6218f4282d8efa713dce232fb24df4073a0e04edc86d940e8ad22db8ca751143743f9f12585bd788551cc7b70821b5c42b133cb7781f60d1b9c345e9adb122ae444be456b8e49f9bab0e2033019b52f2ede4e7f56cc1d1dc3a48bf0666cc7a4dc6b4ffd5077673f2f6761688e4452a4c11b82598cc0ef57213f6c7c12ecc67164ae501b3e87e25a361d0615e48cde249f0193f2aa69a1eccf029340531becdee8eefbddca18905451b48c1085d4cb965786d3892d7144841300b8d2722e92af50fb828cdd8e825dbfb16328f7cf792f311f84078d45306fa570661e1ef2b34d5d36de2fc4b295f5e84fae8d55ca22bc15764932d0c5dd3cfd914b2b8f67477b2b5139c822ee2c511a03f7e9c717a5e8eca6c4b54f9c3b7d85765a78f03b29fb979811ff0c655522b341bb54ae3bc412eb760eb689c6b4c3bfb85a8ce794946214c574105e577acc01d3f8885e72db52075d05a75260a6e4a54872d087040ff38f8942cf150c3615088588cc53fed11040bed573c0e9ab14b987f9223ad089bb73284443f61ffdd61616b8a783e85618217e8bb491a31b7050421f4b0a0bfa5003775933db00e47e4452adc1433da2603f6dc5b9dfe58efe458da25699e512660ac6f1129dd9d7b176a24109c6e6e0c201d784addc9c7f8d4f309ef6fcfb02493abb7c836ba3a371e64fea941031a59adbcd4ef59f0dbf31f361f4282a0e60ced4d9d17675b0422faa1c2f932cb525ee07df7eb2643a67963aa99daf5b119884557ef1585d81eac5c8acf32438636a10d043bf"
"47093fb53a5b3ad544a38fbc3588bea3ed616167a79b2133efd8c509f53626b9cd7b71828fbd5d61b1df6ef3713b5347f65e7c0770715ac1fae561cc548864f9cfe281c6e5770f053f68ace64702c81c97976f471ad11c7551789ca21a4d5480c5d3528503f2f7fcb268c34498888d5fd3edf1c71d12581c393db2ff863e22c1f6c037106e5928aac9118702b45bd36782b2295782f93458dc120e79cb3d1632c2c5e527e56060b79a751cb7653b8c0ed2acc32168b56fe5b50ff9e49a71dc9b82f812b53e095660cd7d59c04f31ee47773a04eabccd7a4a6455ebc7d719c9eaedc4e6c935fc99642acd3e60e0f564efae90d7d1308d6ddfe7eb89520c234cafca6bc7e8ac96ed401bf96e3c9de704ad124b0f9381f22d9ce846fad0b14eeb5f93eb0e0fd0657c480fd2a1109d735f3825db598e2aa7e624f282673947c38aee8832ec8d4dc5d6a7306e3477ab4e37588788109a3ed76741f8f2a796d0f5bef8247eb298fb973c4e5d13666d87b0bf5a7a553f208050dd7140f64fcc27793ea82cf58fd86ddf805a700065888bbf6b5037815afe8c03eaea355c90bbbb448de13773e977fa4c6f06e7695e80882cdac40301b537fe254eb1ee437a6ccf3efa68899a7188e6829b58977917a9d6124cd2af7cfa567fb85aac9c6b971423681a0b6658575ea0dd32054800e08be5683faf46165c56647e1c346961608bdd8e6f999eb033caf73f000a71961cf2fa8c319f4084c0ab499caab87d13aca3f057d17748522f08b36c56c1746e49d731f9355100879"
"d7d114000293520c9ce71098d26b2114030615aeedabd5a6f7fb9a91f98b7ff00ec72c82136a00e5a19384084e0aebc78bb3cf05c3c1e3872f56e254c68694d930eeb46ca8e99329eb923ee0f1b5af0b7276e8600e25f18642247111eca41da427e5b9034a6a22627734ee024c2e2c4277edcb3a0309c3007c19416fa131086eccc6f73784e1a008dba5166e7c8aa4cf8efc3a4e14f59d665800982e46341b9b098508510c7dadde295a784f7a7085f5ddab5b6881b305f99d87ce3883e557280bf2a1f3adc69b7cc9d4f339623d21d569230e57a2bce611de7495d403adf451725d7ef11df4bde5a31a95bdda0d0c2a7869ddeedf2ca7e1986ef430ed44bff6ae6e44f740b2c65364477ade4dff6f4eacbffc67a2e0494c81e0424bc9220bf20aa795e2b20db6076667088b6863243ccd2bf897d4b6e1e58e2662cac593fb9a86220d65964e7f6e0f1987d07a4a8242c41c001ec38ed2442011d8a56919800b4d590338eb8db02833031ed0422bc08b11dd59b59f1d301e82154803076053464120217ca64bacc02465cdf629732cf709777452e177f4a4d1015fec4c36337ebdb8daf57f19bfeb247a27131ec5280038f3d1a766e071470ffb685cf4d9763b7e1b5776589874f3cbd4761d5fd35638918ad144a4a1bcedab9d652477951a716e4073cb36640fc257031f06e4d6f586a9a0b6172727933179e4cd433ba940571f3eb908535a12e9cc3ec1e8f8aa9975bc17241779d972a8fd8581dd3850905cec48061dd5fff1b295757e38ed8568c3a2967"
"ba271e00fb507b10bdd5ac5b90426e48e596ed430b5a3c554ca1cd0d18a90809d8db18853e2580cf2b2ca52ff686b7cf360799bf69c008f87191ee372b44f96696a12632af003eba51adf1e6101628168b92c718c6f7aecb765125880f180047ec3b89fa23bf57e4fabbce38ef0fcba829123f0a3ff527dad6d6b5b0c4b0c4c4cd13787e98c829bec08728acc5e90ddc6bcfe2254eb29ae8450ae87841a39958ab80a38c8a742de64a44e25df0360a9e8672148347d7812bdfcd9037723edbc5fb4a8bba689dfe3baf113778a498e2689e8cf1ad194df422838a618b0cb222aaf020705fcfe1475a8c205690379cbe2d0b5f9a0de41a4d2e6ff85f1f19a97712bdbf49bb90051ab934407bdda9bdbc1a57b0e874f3b2a09df45b7d01bda15330ccc57a752deb2751e495e394471f09f33d98d8face401d418affeeab86be36cd8cfb0f435d9939822041f256ad860733ccf137e582e1cfb5a8b96ffe646d1928657c05c67b8589a90fb32e078697fdf8a3ec58dc6d350a7f50c83d09e5884317829d8e850b7fe17bd2ba4d7fd94b86d060a3a97880fb350b95cde4542cb7d1a2f44f8ea065ae30fd4d4b5fb24f787b8462115b3a918155bae098f0fd7ae2d4646d3731d228909f690cf0116e1ac15899513957834e0a74d8c07f0c696cd3268d631ce1292f66b2633a3287a7e058781aef9d3d566e4e41395fa7e1793aa9f669aff116b99660a5a29fe127a0459eacc3fefa4be95a13499dc844d9faf72dca38d8032932084faca23e4022869f2034ace2de0"
"b286e71f2b569951214fd2eaa3d32da48a234265acec4967c74976b5b5d635eb12cff038a4a23d6c8e86a11a408aee5eedfa7209a8ce8d6bc10271e4b5627e16c5f8ce8000882c461de0113efd8ae9cec6ac4819ab2d6f8a9f189fa2929807fb20a895204edad9821d180c54e865548f9b3eafd8073a734e61d574923f0d1f69d266d970102434b0bab705465833ec9926b03798fa8a95ab98d35863b7490db07fa1abd600abcc3718d105f26f96d20e593ce0c82efc68ae65d03e4e2ed3faed27bc5799e359588fa884ac79c1ad4f5f8bcbc9a2a5605f97551710e2e416aacf149941265406490d32cc6bdde994943fac2102e57785dca3c20358cd431cee285768d9eed6ed32a9919e13f1a38304db6a57f637b6a5c8adf4e829baa82ce674ec7444fd9f7f1807b8f65d4b68ef7b6c3fe5bf653e81525f7900916f5d5809a52c070256e6b4cb332fced5e460c9a2f62bd73392bdf4522be7c211577559f59f62869e0a71f832ff493fab76bbe70f3c0b902fdf45cf49793afdb87558f1a6ec289018035d861990eca1dbfc412492cf86503af00c7db7a0a2c6374eed42b440293938a36f61e1c4c187cd50d974f2a0989b05b8ee207398560b516aea520044e37229fe0efa8b7038441fd584d79c010c0f31030d60eaa4dc1fbdb5a254c089198bb5eba6fe20655808c1d22b9604af1247e2b820823b3c622be2b01ca5f16f86af880908ace8765520c813afefef18e2c112a72fcd4760da91f7d1066cb5c8c902745b83be8defa193bc8b6b93a82efdf17"
"13a223660c6ff4dbbbaccb1a4e5482cc238388448e8b9c24c9aa3acac9467e1f6d96d6deb1cbc9fbbf77b7e756068e22bc3b9e6c275987c5eb99da6a5e2d90a1e0558c4f9fc392371c07a7844cb947b19dd1a6d9c1ebb6496f36bdce2967bea2971cc1c6330b1c31054c07f8d853858a46ae9370ff1d6ab755beb120a61b4774fba521baec6fe8a079862a0471cdc5080c0f073f7e3d33f0f25978d098f61bcb4905c776ce6c0562dfe08d8b9f17de4bc2048d962ad7f4baf132cd0152a904fea9530e7c1f52a85c0188d6ca38ff9b692b2a68204a6dfbfbec06f2d800b4444503bf2dde736be4108845c5a28909cdb42391b5a0207c157003b8dbd4e43996ab5017c5f21cf0d4d9b3145c0cb70fefa767b4689cb750fa7657c4a788b7759f86496998fd4b99b2ad1b2918bf330c1a81e8986eab031e9f86cd93b7d623c72e1a394f0862a193f21eeb858524477c3192fdf5b61ce9dd5b0bf3b3d7adbfa828f1a9ecd4dabf5e318fc40262f0dd204f28b934d1af7b0d7cbcc20be21f1c7e04fdf76104767892404b14965bf8d53003ca9ff0a8f15f5d9b2e152a662ddd8eaf7902854d8561ff088fe2e880a18a036d06c29997dddbfaba32ae4ed70b47413c2a037122d830d55bfde89ba645562cfa1d29f428da108d93562bd291748a728d1b3090b8a7f56293a3135f05d6876021e92aeede437dc7ab610e1e5af0a00c880887754d76b42b059f32f9159d25ffc56a993661d06a7973d190fd10c4ac998c8627b494444389c529e41982726f47135212b67"
"8b69ff36ad29e225856ad2081bd393249f469648e6ea4445e0011adfe320b4eb5cff1d9332c1779edae5d5d66931015e793f730be8482b5f488ca6372edfc71abc4b8aeaecf8051bbcc848d736eb0aa0d7ee4cdb9eaddfdcd4200c3e2f58a97a162565409abc44b8e982fb883b619fa80c7c4f2318954767ea1c63c70124f4342118f2c798adaa7ab5f6ebed1b0a15e12f40978ca8e5f0972a47cf397746f9f482902abdda10ee7f4c610935070f888b5ef8eeb07933e1d6ecaba243fb475b4c788cf8b453638ac43b9f6eb74654835678b47d9437a14300a12553fdb10daff3690e0802dab80fbffc401422a465e10e6414975358249d68e4ad5a1f1c93e295bc10b8c5c11ed98c7ca5773014a2739c0592dfa30d8756be1f66e4fcc01beb2dd58d87800e71d136c12b8f73298cd37b1bb5758376b2111921fa9f7040e69d3620415ace96ebf29fc1a87e392a9e701f4075208a1a8fda7a59b28997c017da70c18d2bbb5c91db86d701cae85a5742842fafec723be9d93b4225619c7188f5bd23c900ef3863068785363ab861b58aab8e91b562b26f72a812e7892ca0bb6ed91086a2935ba82938b367b34f70cbe40c02a8cea92a78588f90cddcabd2738c9a18450f6d3a87c7f827a1773c2c7629452f64e1528258a8ba75bc53245c705246963369f1179a765bed41d*0", "654321"},
{"$dmg$2*20*0e2a3f19e5f9a89ef8371580fc08738b0dd02ee9*32*57b5e138dcba821a000000000000000000000000000000000000000000000000*48*4a33cb05d5fc441fe39477724556bf2a3445d2826dab91031374075f9b5cda25084769a7af11b2e678d79514be8e5f63*2726*8192*585b8129cddff9f9f5875d62364faf4dccb0625867ebf2cf7ebe08913e340c8bc5b62e4c4152b2274a19c3fb7d0f6ee32e7b6c502073785bbc213c28890b9910c878702b2e16ea0c0b0ed1462b831b1eb02a0a5ef586de3e1bb7b5f70b64e713f2bfe7f401ccf0a4430981b89d23afd47d05d1d28d64917ad2895af8264350f306b7a0b67029f6da75fc60137b99131d3678cb8c596295bef4eee92110d09c52cb30486709fff75b80753378918af4db98e69905245ec52c2c6ce7e71ea62b6e530269af23836fb40cbe12a1498d3d4e66ac26b04c31d4a1cc169909f51c0468edd44d051d79c361f547d7f4891195b96950ebff98f70b36106772abb775308cd6d42fae3a60d748330dadf7ca90bd474d05cdc678a0cf41a5f4461285ce0ef0a6df3a400d0116d1d1f17cd10be2c8f164ffbc3797dc022ffe52b69f0303526d3a17c113a56e67e54b4de121787dc62977af8bcde3f4fb596762ce31460a6f97d3d07874ad42f97ace146ada9b63f579a411fca985d85d64bd3262d1d2ab5721119b0cf8348abacf7aae2f57d3b667a5997d0fa448d3da4c51a6f59c6686a92a35ff4d6d951dc74acab9d956e9a942d9356291f56046c612ff09d1e10d8a0c60"
"bb2a4d273b03962f5399ff455ef480018dff09125f6c343f28b13acdbe7f0309e64406d2c453d57d6e78f10caf01d8dd274e0ca6e4a82a208750de92640ef97f67dddf90b0c6de767f185b6bf17a119a735cc97075b93fceeda807d0ec20bb4ed923ed8855202d7d285b767727bb5db55241cd21cd5a7353cc872f0d4a00fa0a50608eeb4cfbda71109a4a2ae97f2c01a40c4968c32ff2c01f05ee768b2ab22f12697805396916d8fbc1b06eeb320d619b0e472b763e7a72acd949e17620f69839543c3852c83e5c3b1cbdcfcfe0e3507a4fecfaf3f27118b6738ae8e33801cb1a2b4168f8f614dea5e673878964d6e27a1d8d8aede3bcf366400cd0155cf502cbc04234a2a418638531ef13c48917328d2bc1736e85be9cd80cf0d99b98d0baf9dd9bb3f840fd15d74788043be9f791540248b5dea621487810371995e5fff578de770699ed8de1f5190cfcd5d47320594299af29efaf204e0a411670c6f4f60652422a7e25ded5fcf26c1d83f805938c1ae578bcab6ea5c679939e5fc6593248d6b8fd55c454d2c69e8c756982c01ff76b4911ab494d90df56d7743f4d8017423a045eb4215963317164bdbb473620e8a17507a9cf26749c6141ab7b94af974db92c875ecfc4ba4421a37da4454867ea3f7d8580185eed9ae3271050d039c25f7b72e18024f91edbf3e1bba71f697c8451302b1ba97c8463b3699754fabf472ac399bd3a783b51cc945051ba1b411ea8093278606efe2b34b3992033fb773fc42cef45fb0482992d5f867416faac3912b82"
"eaa852935b54c1c05d2b5be854fa75ee754235ff1e84a53564070de838fbea7704fc249a98c7fd8a4d4ffdc06d5fc0ca39071fc5be83b0e37591e14ee76379f4c5ac64b21f016517ac44a12161543c43d40a8f92237c99de44ec220fdb502d82e96f01f020eef2752279a5aa3d3928a4cb594c5e145d016375e3d7a89d2bf12d4daf3886393c31615fef9e4201cc0208821e932e8b26df396e7c29f2c0b74c9f59ab79fa44b4f9c1156741e3da93df51bb23b756657187f1902f3d5c79aed88190b4a5f814ee1010b2fe82a3edd867457dbbf0598566d80261f83db810d058e785261635cfd1260c6b3b43081deedbf0b2a30d801618090d07340a6ad528b73c7d652efdc48fed161b0a0529d5d1e80fb0a63411d53e75e9ea9873d25a3bcb243faa406293f53a21b37e80023a302682943a30c8f1a5804a3700fb92092677602c39235246f359503cb79d2e084cccd2b40840acc7ac7b18b4e1a665e3833f5b4aefb40f0b36b70dd6b125ac9999d113fed15e5cdcb6ea6043036df3dec7f5638379971758e50f1453af5e48ecddf1d46e575cd2cde1b2091c1797df41f152fa77621f69169d42398312155caa88850800f9a8792c364021463467248e385bf45cd40c7869efcd6e9a24152bcfc8370ae901c7757a19627573a8832e5ea62c344fcd60230a3915561b6fd957750af61ced54ca1ff1a8edfe5ebbad51a79777ebd4e66c63a248687220e66d923c746f56f009f9d3f1f186d987c057af87f7a70a213c9c6eb93867983c3191ee956c8991275c5"
"5b07b2ef0eccb8b0287414a154afaca67f218ca43924fffe6e6161690756e3d6a19a29ca972987f603727397e5f4fa19d0c3f1e74f026d35c028bb81450c7b5493a7d837e83504ae7369a49b2354c6c6219c79ad8cf9f5bda3765541d9691b84d19cf1fb9534f859b58257e80a7548c12ca2c0fa34b8b6248b30213be0eb60de5bd04621c163e4ab00d80adec931ee00288fb98e5eaa8f6ec83af863b8a3634f955b54aff779725479d80f2fa51d25e721b159a3dd814db70836a32b3a4e55c4def271a1918805f31fd3af464c01006560b36e1ce0a745d3bb121710083101d1ee469b971400d49483b6c4d858cee24614786f227f320fe6105d61fa8cf21136e9160770167e1b7451a3d9171f56bc436f097d73dd4c21c245efd72b63fe21d1600213ab4f2250e6c5a16cfd3823de93c9c56ced668faddb77d60f4d4d9a9a3b3cb9de0eb5694410fb760b7421cbf6e40ca4e8bfd4577fc3528e0162ea4c9aef069b3e4f199120a10209a6acb1eb6e39fbb23896860eb1366c6eef023c2bd63edcf73aac6094d25cf3c1cb0caf82b1010503fc8e09bc537e8e690f8bbc0ef492f848f77442cbf28bdb42aa8932109ccefbd2ad6563fd3d315cb79a0a5f04772105e8564e01c1e22f1c2ab98813979da0a08ee8812acc1c18097b8f1fd95424ec0d1b63a85e84257d382400c5f44f570382ae8128fc0935a5f7f518ae3808b79ae7aed4990edd9257ccc74dd19adcde363d4c7e5a4594e3d3ce88d308cbb48fe26edad968cd54cb715e460c7b421f6debe9c70"
"3bd684a52b6b9571a7cde4568d7656e9bbfc5559d2c60e11054cba9eb54120bdf13c4c5103fc777033014404d6b4a65ea0a716f76a1433ecb904e9ac28b0bb8ab5c5b0216f62c18aa29b685cbe1c9172d51bdef81e7ead1ebb5d6c7cb078fd32cd63c72b163d2848de4c6dd59b35e853d6ec578b681af969941c16692c9010576f6f3777a24e87084c4b78a8502d083c137237a60705080aa90b2441e2f01ef9eef5b0f2b25b2b745136cb143405fe5c7ca013f88392428868bd9f06bbe41872c4cb1f98b16d74d064e66b0c435b52913b8153d47f52fd95ee73ab1f25f1533febb72e9dbf65d11a7568a17d2e8ea2616019297846551c6a3248b0a23e91ac1f38b21878a28f828e8aeb19893478aa2ff2f16833d1b69fbffe68b569afdd1980cdf6d8d4ff52d9e2708568db1a1b50847c8310e4d85dc73b59ee31a63bc894712f2d2214973c2741f4db4f3ca9a337e1f6c4ed3858370626b62e975a85e94b498f8c3c2073e6d6fbedb40e8a356e6d6c77c2b5e13ee52fafab4c8d369ce17a5c40deb98c98b60f433889e092d7da5e7e991b73c15127364d70a879b16ae774d65834fd0029c3a1239143b6398bb19ecda0328f39f39ade7a090b2c5c4e75e4922c50f858195c7fad64e4305d04dea5b85d4dd5a52ac4e60681c2337d3a2eb0b47745563f69352e1c17b08a3625f7ba530dc5a393238b6a2b92bebe6b94966537763ef66179b5c622ac068acfaf796ed4f4214d7fbb36eba5c9216cd5ee1d42132c459042063c71a1323eaacca0a94dc119145"
"cef90f744d16226d7168dc9abf46551dbe25ce179e85bd44cf15374ee498f3f3f8fb5800c6cbfc427a834e3f7b3b6b6c7333c5ed46eb2a0c93e4eaaa6f95072221d7cc27d36ad53fd5fee1e65d91e37957a9d34901602d5f49799db3cb4e47e2c5bcfe36008ff0fbf166d9e541504aeed187251b80cc72804687f58b646ca3893e8c9e4340c9580a2008d268e07f7a0705bf062c6b1ebb3a62a4c961ad2f65ec9d44c67ad3a39117d2427d9c3d067df7c089bbc905b319b30d61d099265de1ff42a97540bd08a1ec79a4cef4f692bbe54ca6f95d6ecb82d3ad2316d6cfaf9a66a8b5e5f00847b55509cdd344ccc3fc640da87be6cd4ad8ab3e510b31831d3151b2aea6675c97767076360bcfe1b317c3786dca2e4b3e90818064abb319cca7bae051390063bc6a0a0a133187a60a6eb82162a5061fba5fe17f157e9e589ad83d2f1760f4055879445b0934c954622476c29c9c577c053c723786c8d25829db7a896c66eec594a6b798ed278a824550795b0904e154fc06ce8783a773a8919b624dab70f92000b832475b77db27d0b5bbc5578765adaeac6f61166094fe11603f37a41fa047156f2e57d80a47d110901d96e33b5247a587552e37b7a0712cec420a5680ee8e5550ce5d0996b235b8898d67126415184bc9a0ec172d9f78f595182400c010d905fa73b5a6fef2f722b7f9dc51b9d21d85ec554c9f32612fcdd89577c47b3cb5203132e76ed5a39af7e9cfa2c92369464e14f8333fc29fe7a662b9373011f0d4627c9ba7b0ab0c050d0e67c625c"
"dc83a0e244dcfc7f5b58ceb0d1ca2f16349ad8b16a48dbbd63da41eb5d0732a13ce5a7ee7c9088739eec6d63e0a410fb53f83cc75915c0b6353a75fd2d219986ee35bd3991161fd054f0d39c2c9da696ec2968e801cfe726cd512ddcb6cc28af65b1f8e542d1ad6a6d76dd1582dda6af4f6c9363ad7117e0ea0102cffc1ba0d94dd8abdb5ac37ef9b444387bfac2b811479086e550ce3452f77461febec72ce35d06ec70b94779b794dab1a3fba727f364bd0a65e7255da20d77ac6b85ffee926a1c3c635366a4d5c8233b798e565752103c66d5e7f18f315f7fe2641dec5944e51e373f19fbe1b34dd00f4604a4f741a5d4a8c720bf4e51511fb3316951ea63c3129c4f6242a9014a78a050e633ea5bf85960fe340c54043d9bffb969f8abe458a8c9dd02e9416e0f3504a5bdbf6cd0b4013b4b548bbe59a23149a24296e0c326d69affa61a878baff7525bea12a4bacaee6c216de31e22e218a3bffc996eb7a3b8570caa06193b56452ab7f3430c758c3b447db98c7a1faeafffa497d938d9b952e3ab3f6774333a02742375e7e1dc39cee15313d69e8cad1a251274ecf48f273cb79c58aac657adc8d77f7cd1755ad9a2fd43b69cad9d2f8bd77695dac3c43d2469e4ab34e26c7debaf33eb2ca6cb7fd0a963a37b7dfd5304b9d5f0bc1ae0940bb40375001e9920d4956f4011f4f1263c3b7cb38afa1d8f7c8c188bd226ac3e23867f3989d76a402a9476756e03c6c3bc4e3ce78095125ee11e7b47347bab7a638b0088a3b18f23abae9ab2f94650a30e2"
"9abdbba8ae9d9d03cf5b12ab23f5a6464547bb7078b91f533ea06541941483359a8562e709608e0c5d1da2c7206c5af49be0df87a3244903293bbcc121fd2e20ff909a90ed836f1822ee2b40530084f02bd9c42b350a4703851d197d9c465485112f1bbb21aff46daef510159a1f354e5fb7b11508a3ffe12577b40d3bc16631f8a79191745fe828303cbe5b6d9578cd80f736971e1f108f02039e0bbcc12b42e8860cea15cc18505c3e4242ef481930f3e2c4b64ccedb5b4d9837461efc7c48f8b1a6dae1041e696b99fd8c9108ac1fa9d975b4d5a740c4e5bab92004b7c91cb64e80a67aff2596c919b73d88943538e0996a775b88857187e9f97828f8661f89252cd0c5577b27151b5b0021f17937a9abbfd8ac3946fec79a4063af00802d54eb08461f951cdbcec92f593eeba457f381a7a98f313ba28d21d2574fc751449e1c3b497e09b90f8e1840e7a56159915d98b36647dcc15e1b335102074741f1dba46f0df9e7114ca29d02a7e4581fc45c48e6b31cb291760a05774fdfdc0448abe313ca496bd2d1f011f4706072d69eb0207b0289f5dbe4d1f73355b206ab3d5c777d1d9dd65281a0dcdf598569109e8fc3b56af94e4340929457d2c45d9a9bbc37741dc031136a11955a465e0baea8c11c06ae9321dedadc498570efc3191e67354f0cae6a763e84aaf74597dc1d329c81231546df2fd965d2ce0fa2026e0ca896d48bf8cff97e9e1fc5e035a13a1dce07810a9e87c21988d7e9bf19dd68379f346d232f83d776c36791ed1ede88f8bdc1b"
"62e3e7857fddb802ef7771be6a2428b7bb7e419cd95042d7de60359365efec7397b4d7fd32a4d7e8b924930606e7adc49333809812635939f79a20eae6066fc494ad27aa5be989663ed12f9f1c82d092b7a4af546f6dd33ab862fe21cc45c2c7c58842360070e206ac341c26ef2f92cc7629d873a219ea1177ac6354e7192f4c3f3aedb580c322e1644c92b9882a96addd01a35371c07b6cd3d7e4e38d089559ee41bdaeaf81650dc263a69fffa6d2713d3a8ffcadde7601cd2a87c23187463d3f3305a36ea01743d2cd846cc5ac96c89241c86b3c38ab97f1ab7b9685e68260fc116b7d02db8cff929b871dc02379d203aea4160c6302a7bad3379ce2b77effb3f9eb37d7826181ac8f606e67026fac0f43e39c72a04a6278f89d16a6c14c6d6e3dab80e9089a83c7a370726fffd0a2e6a9a6a950fad60982eb28b638ebf2315932911b91e465f076e97aacad4c6e19ec46a8ba9e7a19fca03b7796cd6d8efe6d2fbbb96b3fd3f85d4622fef029819efb34abc28143faf10ba4879fa69d493908649f03853ea84bf7d5bb21c6c541edf0c0aa96347b4102cde3c27a58ba0788ac02cdba243a3f52e0ce4d682d41d432e632635cdce5be1542b6b6a8708e144a6acf80ab3ff5842ca2db90e9d75401cfc99746a0919ed81983d2171b4093b1b07e5e5c45992f657c892e91c16cc6017a66af6466ade21f4b378a6fea6a8e4bf000ee986bbc0a170467548e7f6e797381ee89fc431f7aa562110555dfa5c275523c202744541d51701d70a8f3006ddbdfa5f72"
"9563bc0234d0b2759efb747633221706cfe73d47743ce6e6077943ef6d0801729e1301ff9bbf37f50667909f1cdc70f95040c841106ce566de5dded0fa485ea539978a88ca8618e566e9da4f2e215d544ee62accbe75dc17ea26962d78bcad516e6bff3152642e346444db494a909478bf6d80aec53f3ffb3311c6283711eb96fdbdd8e6d94c71cbfb9d7ddc7f092df5092199dfd822b98e21239bb8dd17f0c101909bd38d309bb5456232f5a1b731990a4cce847394fc40b859a8d89c7c02c388e7d6ad42bcf4818de33d696ed6d6ace4c23d51fc9d7d82d0602dbea094aa2db51d9aa8ef5c1f4803e40f6f5fae44da3c3c6ce9b1003d95300871353762062d1ad49a31cae73d569bf07d147a0c8d212e60b1be486df08bc353a2e3ca7337b83e3db43be03147114c229fd32fc2eea5f64d5d5d9848709ad7335dab3909c1232d93e76eac218e7e0497ad5b7b1ca8d9ad5447879b20dd370398eb8ce4bc6805064ccdaa6d8ed1e98e259b7654a75848705dbf2c3804b455a9e3dd2890f8d74f0e968dd050ee81af2f98fdfbe831c16dae6589b9b2a16965713b8fa52e5d2d4df504411ad9c14929e560a5f7e74e98d72f71223a5eee41a40d85c177183c510881950bebd3f0ac907fbc5a4efe70a60da6bdfb6870d7fcefe04fdfffd1492c5033ec79b8de002c41895ea6e84393db391b9692983c84148928ba0fae6b2ee3aed2289a9e053d47340b5faa4870fa632c1b81c516a58a049728f941f57bc34ad53c236d33dc2ab6a196e896968d0a2bf651889"
"825b8f358ef4874b0e75e39331e513c506b29a61495e78722bb25475ec2ddcda0816ff634062a54721c9fb425ff286336e7036928cfac29216dd0eacd3e5328b6979f831dccf403e87ccfc4346f5743d972d5047f6055bd86c98b8fb720a3cc3f459750ddb870a845c1ff4bc3499b1c92b6e591eca7e94f1f8d2fa3c57fc97b573a738f7f55e3b6cc975a813ffb7f897930b8de8382c5883ebffba463ce72b0c50c721db403cef01d5be035730ac3c6f6a3f78681218656f397966753c04507e08a09f7176c3e37de40b9c7faaef1b675fd083c9cced4261dbd4a289f6aa0ba04964e1a6d328ef05786933d67d6da009aaac7d4a8ca31df5a15e3874eb9b288edf7d794e1abdf9e411c5bb87f7fb27f76bd62968bba4d53844e76487818ddd38620854debdced8930ead6b46f3bce6009683d3ffedfff0be83cd8727bbcbf428c761b79a3c06a7c2de7b99394030b51eeb954cfa3fa307a37881a8dcbcedf9549e2600b72f3665946d14071d9d22894020346466bfd2062e092f21e38e920609df77e3b8ec024334c9708a415d3408e22645f06cd6d805e8da2f4005000aed542aa995816bbbf32597d9025daea32fd07733e080188d6c5c7af4ce8b7bb25d7c""50e9f3cec80e86a8f9f6d4e78a40ee20fc3c83bbbd07020f0092cdac8ffc2d52c24166d78da8ec32ebc49f815264c5ab29ab84f3b44ba75c06b80aba2966a617830efb08fd3fdda831fedeb67b7d593c661538d422e1a9fe378acf51b0f2a07f34d84624e0b90af172e5976a237a7dea10f"
"a7cbfd3203d1b4985a1af6c2d2300136226b2edf519fdd2b7b5e3fb5b0c70f2e3160305fe9dd0c09b98d522666e5100532f516bfe24d12d46b5decb4d4cbdd5fe9cd647006c1c7eba14a56262fa7a3b7b6d7b22032c1d444fe023d66b7f51004c6176f4c198a2998beab66ca70e1343187ae697e9fbfa6ca6443d617552e6b7bb73c59613ce0a7cab58545bb40636f54ccdf89c507098680f4486f821b2fb2c7baa182686b0b6f893fc9575df701196b14255b547b925387cacd5f4a762b1d4b7f713e7aebe4f75ed648b8666e60a4f8d92f752451d704e19aa102bb3dda418c80f3b4f395965ec36fd9474088ac213b38220df73c8159401ff87751bbe392e0aab031de59691a0a77ba2ab7cfbf4daf09fa4d7d61dc5b456dfdbf7a60eab671ed1f1a67fd58bceb34e981a2dc3c3bb8a7a14fc8443b47a123662d96b4df2c584856ba257f39749d51caa70b147d50c68d4aafe51ee195f1ccb99b7015de726b5f0e85bf37617138d2b24d1cbe985d8d1cbb40a52e4c57e20c799e2f5ffc0557be9d3e2bc5b99dde628c4dffd5c8704c78689e967bc870c0fec80c3c69a2453b052a46e142309fb21bcbdad7c6c5a67df409bfb9899ec58ff0973e1813f47ec6428e35a932c117b5dc70a8f5b1a9fa402d59fa45714b4bd79bc214d488939f997add26d13c147aa4d4239d8aa0e3c70994eb4a8debb7cf292b3ff59bc36f97a9acad107fcc556c24a309c4a15dab16a47a71f31324dcc8183fdaabe1fbd1cb3808c1c35c311ea51188759d4e1533d39a9547f"
"04054e2ef994c97e213669f08db02702dd8b54154e7376f256dedc67fcd3dc48f5e0be91f1f88766415d203bb4bb11c4a0f6d0888e0c98d3b8519aab741b20ced0e02a5638e40ad2ffc301318a77e57787995acea46eb8ff7edb535036c3b3781d63a02bce56499cd03ae75ba6610ef27124da36dce85ad406c82e72a0319dcd6e05dbc66523be5015036de859af45be32c664c18ad712bf09d361769be3e568d5f51c943ec2c9f74077cb9f5757de92c643a2963d69c2cc3f010908e661f3a6ce202d50d72a436319bb2337ab1babd4f2cf1bffc3de25a09dfc5cffb31c7080c5473b4ff673fdae11e64cd492a784a106beb65bfc01f9b7b97384d877d9f4440b7434240e98656703edd66279f1bd5b7cfacc8a6b511f1db9060e813f2e37a8be5de25087b0520e7729a873e125d7cba84b93cdd333e8756630d9dc9e1815832c8dba1a3c51776948b184a916ae44694664192af75a616387f47319bcd5da1d94fce857c8e76c3438ae5c7c810310058558e01b01cfb5676f1a5a5d027bcd1ec62428a82b78fdc9dfe69ae9c0301f6f2dbf1475e1cd1804d05cb04583ae62efe63a6f1d20d5c5675f4822ddb8f6f6af3d639f56839b1993dc40223341c04d829849dea53aba7d0d2a2db0a89881a2ecee4f66698aef5ebdbb3c6d65ff03cc1a00b714112f0b111e7a97ded2abde97767e0ea6e19a04f96d708d419f457022ac21715ca86305b8d5e4f45d6382c7ce8d87a8f0f2f1a18134deb9a33b334bc04697479c4f438f5e58a62a1b22b49580fd46eb4"
"946d07c505e9c778dc56524880e8fb565487da236bb1340d92dbe21516f40a05dc3cec3fa4a56bc93ce57e7be50ef2fb38c94790acb9702dbf2ed30d6b5cc1e0173ed4c19e2822e79e711a523ecdeb6742d90353c904876e66b30fba8975d35418f0ef3fc8e5621d8d243973addf756d1e4621618fcae42af188a22f47f0f8bd0e821c16c8ca2a15e35d855ccc5c9660ebd2fe8966e6b86326905267b80358328483d0045fc63af4edda4020ecba5853f005b9058dbb81092cc12ebb3205ade902cef207f783a3921225f3a8a108eccf02cc303b11a2a7db60c897f31480db900fb1a6e1ccd1ba0aa61214037e50d8eb1ac777fc4a467ff9b9ffcaf34fe721300067d33a25f9acd43888ba09cbd26e8b269fe84065b5c44fdf734545fe21689b838eec4a00860f654df33f87d0f115a6fc1ba4f0de641f06eb8a19d2e75aad7dddc6f00c8d598015541fc8bd22540b9bd3babbbf3e41212d35cfef1236edfa5746b733de738c60901b87bfc3a4c7d49eb16e7fbb7ab93083cab5c225f79ef03db6d490169b5ecd2791fef9045e017f9dac41dbaf841f050729c6adf789b8008a82e61c80cc4d06207dbfd6b2a9cdfb67ac26280fa9ecc298dac1878fac6188066b9d8637f772136edaa7f64fa491b0bb4775656f5f1a3135686205b8217a590c088cf448892e134a29ef4cc61bd76886663afb18ad504b204ea52ef61782ce9ba44fbf2e18e1d59302a1b69717375be70a295517b069d26e161c91ec3a1a782e38efa6ac867dbe488cfddcf8c200135b059a0"
"da4b4dbadda9b742b906266a879da79da144eba455fa7cc5062d326996acdddec0eba8666b0e1e6c7116a1e5f04f1e94e5d85b77b2d35deb45402a589d46734810ba3a74414eb53181f75c2f0bad61d9f4aaeb94f30a1051f5ba2b2b30f1445bfe889da81e550449d863cd5af77d49d344b63666df8206bc04686ebdaee954da5f14692bc2bf1b4b01cd6b2bfad93dcc7e5c08a5059d047f6ffe96a17c828244b234a2abf28674b15d14b735956c0a9bd438183666d6926912358edea95ac5b1b6a53784f47819a3cfd4ddb9af8e74f30e06c30e218edda9eb8207dc7cd931d6e926af59f8238225dd037b47c7a4c8af558d981a7c9a7dbae3fb66345874b27cb229f1c82b841cac0cad018e8f75d0731d5a8ea0c4d530f575de7d39d77fffde64c9d1fd87b9af3759d8a275d5a1d95f1d2d0bee007544f5c39ecf4013c80cd89821f79af3979f23dfff87d093b85b892b93bec546c5eccabf41d04c65bb571543f2312ed5e3596ec5d6bf8e57e9854164d34b48ca0ca4044a526e038332348eb801a6ff342bf25750abbcfc27e7cb5e7b026db3743b210b91d1fb688c8f16d4e40203d39272f22b5bd0f796f0fa09c90*1*b48bda800b2b3665adca330cfc990283a604b08074521335437c0ed7f2a997069c88d620b638ee988edb3f6f32be1ccd01ffb14b66b2c213d31aad92b25f66f226f2793b5e554475ce8c1a7f9541ce66c594379303ce730fd77a6591c97f5bdc400ba7e8cbd496c188c2112208778ff9699674b117631d8f385ebe45ed91dd60a"
"4a657ca39c11c135e426c03ce2219392f55c635c1736f31b1a7a892273b6d9e2867864606aa0244b82c8be1748123f0b8478baa9402521583f24ac86c11801fe340e64628e8840aee6a093b1bf25aa05c74d1c1dd8ec48321b34a53bf78347a59fa9ee394a60b845cfd4c2f5bc53541065f1c5a0d3953d9808b26ee51d17dc026ea97a2ffae213bb9818f3c4009480ac0d1774e6237546204339db20ab366a805ba8c34304070959a16639006ced72bc3ba6430ef7e5a10e9a969ee233efc23b2d99bd8d49c3615f0da372cb98e077829f07e112a5bf4357a3cdee0268bbee69d31fea1ac66564d4b1c7c303f9b41e2b23b3c7825d1ef93ae1ca1aed1607177bf92cdce38fc68325a652efd3791e922a196eba24e9816c52afeb1d84577b8a22125c1d90beb57cacff4b2a637061d69bf7f1f006d102ca2acb8471909689d36196ec300691ddb9369868f3fd577e463d8b74c7a8e95fe2fd2954136f9650f7301d4a91d9c41f647675d37c1663d4b5c50cfb175facf30598a9be1ecc2f33fd4ec7e1ecc7dffbb1180a5b224b4eb6d0e0af4ecad6cbcb2a26cb3365a723caa2eacf9404083a427d5e7e62e967875e53a8eaf4f5873627717ce802b6b66d627f3390b50c0c950dac739ab46fad66920de3fb8edb0ad0a3c93e7b3beeb90a26a1553aecf4d1f3b17b7f852cf5441bd626012ca14d8e4aa2c43ef6a272f9f6990672b2ead99d839617069117aa10f840c379fc62de5ebf5c82ed59a5a1f76b0fec724ea809411709d88fd2f986c35edf9a562e3fd"
"bb13577e2ac78bb854768ab38850daf931c1b8cc3e6f3c244fb339d288348f88f792954e90b68d664b7f941b634aec4b2d54995ba08b999d32d007e85e7e0df4dc6022b0d6d7a23ac5bcbfb2dd6cdc300fd0e4c9b4403a53a67a1c8979774833ba4b8f338b1932424b8654e02ff039967bb43c3f0661bf22f638a4caef57d50acce63e472f1316fdb93e75218d630d958c1aef855a9a7bc54122a26ff94d78e74d48aff82a485f584b8acbea147666712d35a7167dc5f92ef4059e42c28ba66fbdccaafe71efc630b8ce7fd840bd2802c2d69a4b09a11cf17c9321d9ccfb1623bfaa89786df732b405e2cf118611e9ff153dd2db2df1953fdd888f023e74e23f3a5595b81456b6ffb33e91d65f08fc8eab545412b18be47d14ab77827073286a735187bed1b12fbed879969f7d06c53041a6bd79bf6c5260342480cdb50cb617c2b4111da501ea98f368320094c5353a36df520824ec52dd15e818bec43d80b537c0d809845645429ea4f7635528cb7b8149924053a76d3c05b0c31e5970eaa014708c64c902be5272513111a73e682ed9f473c87b964a4957934424bf957d1e86c6c90a967a8643eec2b65f08d4c91252cb9663a4e5aa4ad9180166ac633c0e5f5170656373489126e6be09e9e8bd6f226f0833bd392884dfce749d68ad51b1f0e0ef5fc5a8876e54558e191abcfc4632409547a8a5c46c2b546db07ba324b4d327ebe86f87dac27b64d6e0c8250019c1114a4f8fa39523dc3f5d597aa33af245ecca15ea8cbef7604eca5ed804ac4f57c12"
"6e335763925b88128b7289566270a5d7d1602481647f74d71bc1eafd0913851bcf07047dfef51b41fc02215d136885e647001f9f47546e9ea6ba0beab1d8a276cf9b85d780c05d4031f55d35d54c56f7fceeae9d62c58e7e928e591c2d6b1d14391f829f3e30bda6132bc513227cfad357be2c6f045bad7be72d01ceccd059327a72ce044edd534a5ddf71831bf07ebe84806feb621a5b8d71f4a608878e5e5daf3f8b4b3eda75f74f03d1ae5aebd029f037f66253f542aa06cd6c29ac5ed27ecdc7641fb6d54c98e71491772944303d3b6be683ac44b7bda5d49209133ff564cee31912b8e024cf628e0719522b11eff2e32874818f9a0ebde427657558a72943d6eb25c4b9d523336f37453af157035a3bc5ffd13847a928450d4e01f2ce7ca51d456939363c3e5a69b0d25311682c7b266cf86d12b63dcd322be77594c7f929a77467566a8d86a7d2b583b95f76626244738251fa762e0b2825c7668d6dde8ac5579c1a06318e5c5a6b2b1bc93bce6cd4853c50b6662482549290b15500722e3d6772c7541e3c864291dcbed84496dcc9ff4dddc974aa8b17b7ccea56c856f24ee2277a391c3c0c2c5584111ed24fe64e478e3c4d22380b8183222570fa3c70d29230aa21fd21808baacfd41e2430fed7c3316235e6b4c2c3331ee36d9e5c94ddbd73b351897cab7ede8a7c417c753d8023cf46694acbc9aa6ca556da7de108005330704cf54b1ec7bf7df02e36cd736237316b3523bca0a53a2472e68d30d95b1eb49282b27530bc69cd154b7a4dce75d"
"a3efc65c12ce45de7a63632d340fc61a1789129df1554813a15c9a6ad101c07363ba8d967b70ae1767f8927440678bab989dbe994922779c3c277055a35bf12d6909caba8a4b6bec7f49dd32426d858e53164c8db77bd1b9321b31e6c1ad1e92596bec4ad39d5b6944c7585a5ad0c6f83f64727a7f6397f784d865ba3b9c85343f3a2828a0e71d75f19036ea0f17e265750d6a01513be2bee0bd0a837996971b87305dafda12679bc118a1df188888396e10074254e4aeecb6801e00e8f3ade2889b65aba9e29d2d146001740116c893df1899175dbbf88ec175216df3d93a88fb6957adf64a3849e26194edb91188c0373fdf9be85a520c173817ccac3e4e9c88ce0bd9448be3f6cf3eb92b9337ecf2e63db5887e1113ee31529c373e83ec02012ddaa8812fa5c6b8be8febe29d0c286fe03832aee79018fdbaedd8bec03345c05faa1231ad148bf4531679738a537ec490bdcf78a0d9dd13e6988e360273c388b91006a66176c93caf3594cb098d5f4287a37d79b636eb566eaeb73ef76a4a480fad73caad3378d17a9395bf71c6c43f643b04b4f1773939329470e51053467b67ed8ac0807b8806d26d16f6f4fc15b3f3cc197d24ea26418cf970a5e7009bd871aff96be823fd80efe1adcaa882c168692b53bdb47effc666a1768d04d0d8bf199d36604e82b72fcce53e86d063c347aeecc79a846f8e12cdec679b857f85a75fe59a1338a411950459443b3fec6511dcc78d5bb6dc60accd6013400c0ef71f19d7713b37777a75e96d0d341d416c9cd94"
"7e3c442f6ddb31daec66bd96ca31b01d2dfb99d312a651ba5ec1765354de39d7aa4bb096ce7edbd93829d8ee2b7e3ff364f5d87f653a541f033db6c3266a03046f8612ad8d56a1c78912c9774c86a8d7e2eaa7f3bb1033470789ac2c32bd3c2ba1269bb01b176b167688f8fbe1f6094c3e2736bdc1cb1733364011681be98047cdad7d998241e121e6508cfd665c42b30f22bc442f940b5c7d93659f59abcb17aab1f28a02d0b59239f148211c525dd209cb932c54f24fa8a9541f0eab28b4c8df80845058e71e5447959bfc7f7d28e15542523410bc162f566875ed6d9d4fba519000b8c5d90f894f2bc74dc8307e26d4e0a9b418487d7470fbd64e97e660a3038a10a26a80e7cca09a3280ce3c87d07befd6f65127096d6075a18f30906828cee1f8b968dd3247210041078cf6d28f05977e5c172a9ecd83167873881e0ffcc56615ad0d64b0189ed8d559e43cccb1e2f8805df7156cb11f5df9dfbc067fce9fb3ee3230e28edfcf98741b9883f9f0f42913cc2be1036a0590107c69a9fadd4c9fc39df872f0db664ea7172fd72e0ad756be95417487d0c2bb38061c52124dcb2545f15a5bfd39d950b5878a067945733d8b1dc37cb85dd9393c98b0751c83d8e848fd1bd3ad243f6a8af7a8cb8cda7e1dc05324fa3932423fea0428131646534e74398f1604146da26a615045ee49ae2df3c8fcd16da64672845a946de4c26c1417c534a2b62a408a8c30c2e4f73ee44571259b628249c9e3f65e7b8d22002a170e7e53dc7c4cdc0073491db2cd6de20cd"
"df07501ff08378ac1cfe3ef479491f3fc475f8aa1fb188706c264e276da3e0399e2bc17cffd6ad0ff94d2d3b9a3b46e8c1472c41fc1c002daa76634f94b3bdf8560cb3241352c6f1be21fee70cd54a1d96e31d71ef99589b93e7ca8d026abcb4a4fbfc8c0f57d59a6d9e760f02fd0a569702da7f59da495c2dd7f92d60fb3220cd7932a032d40ed29deaa5fe971128c6503eb9d1029a23ed6dc4fd5e8c5cf0347841424d60a5a07a9781d08c85222cf7241d199609762488332a6eafbc08cec42c876da9bd3fa287bca12f71b6e33c4453afb970b425a45b9baa9aa69ebb3907e06e6610f100b00c86752b2c106c2e0b71963f1933d315ceef89132c7744149db0c28f62b3d7b43d570d1f5c40bf4b7470b3b8de30b0d756b8326542743f2fa5cf3eff226b6a658ecbe44dc9a0e59f073f999d8c3340ba30ecff6f2fa4f3815f0d4c665b5109ce8984971e5cbec806888c2acdf73d2a330de9e5133787aa4950d08759f4cfcb55ec8efb43d421cf3a9f601a096677eb95f61e352a9adae7c0b971fb455f170c7ed95329b699d6e93f024786507e2e0acbeffb452c26d8c041cb88316d09a08af54ec48451f9bb685a23910e97ac82bb41f19f6b42fa10cfb75f9fa8edd61653c14a27b51544e3fb28009aab76d060135df2d097fd4c2f2e63dba1192c648215fdd1dace4824d71e038e23184ede7f61baefd747aed93b9807d0b3b7b4f7cb9eb171d1ba241b19cf1c74781eaaaca99a458253777522dedcf3d1db6bd4eec4459e59ad635904201b5d91c77bb"
"b6e91f00f5a6f29794b35afde3dcd850f08ac5da097549ded05159567e9f7a023e08e49253766c0e151852714987201e90df675368ee638a947b7e6dc20bedf60656971170afe2d453662685dc1ceef8436ca8071680d0346239b41a6825839e9d5af12f9574d51b4672c5fa7f84bac497c8ba5fad2c10fbffe5ee713090b903d7723cd28c1b189a47c6a9fe9a88d0881dd60d1970c6e8a6d812bbd089c10841e5ced1417bef41f400118fa990d157bca93267d407989de017bd48f0231d43b9487526072e2755461274b3f5bf27847dda36c652a2b1fdd3815fd4ab93863426b31ecd1e6a9094dd2ed0190f8138e650dd2174fcc6b6ab1b8b91cc8020f2dcbb14855e7dd0bc1b5a01f55f81c0476daf1684cc4e72a68327120730ae92c45ab4e447c4ee900d61f79681667eec61343e4eebdd65c5b38a1ba5e3478f4d2f59d184ec39aca445a0f6edaa6840f04bfc19acf23db4507609cbdb44514b36aa5ef4ffe46577b711d1028970916eae919f1b4913d5894a24117cd7cc1aa8965840865554ce663af470455c0f756c795fb29eec04b727b12f7f3796f572ca2ec1e8771a88f68999e16b2acb235a7d9146f85f2be5a034babc3bdde750eb7895396d4777c144aee517a07310dcc8c9ce0ead93abb7f1eb4e34ed5036361d682c97eac1ad7c8158035e40a713f0f2e6f6e677d4b11ecc97e101a5b48420435dd218846ae622b416faeba7e0003bbbece71c2aa046715173b408c8ab2888b0b5dc4c34683f83ba9a83795f86122e6d80597d3a952a44f"
"5a1edb6f294a0ceebefc3cb54db814cf91fe450ed4c71d0b4091a1fc7474", "goodjob"},
{NULL}
};
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define OCL_CONFIG "dmg"
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static size_t get_task_max_size()
{
return 0;
}
static size_t get_default_workgroup()
{
if (cpu(device_info[gpu_id]))
return get_platform_vendor_id(platform_id) == DEV_INTEL ?
8 : 1;
else
return 64;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(dmg_password) * gws;
outsize = sizeof(dmg_hash) * gws;
settingsize = sizeof(dmg_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
static void done(void)
{
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void init(struct fmt_main *self)
{
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL,
warn, 1, self, create_clobj, release_clobj,
sizeof(dmg_password), 0);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
char *p;
int headerver;
int res;
if (strncmp(ciphertext, "$dmg$", 5) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 5; /* skip over "$dmg$" marker */
if ((p = strtok(ctcopy, "*")) == NULL)
goto err;
headerver = atoi(p);
if (headerver == 2) {
if ((p = strtok(NULL, "*")) == NULL) /* salt len */
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) != res * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* ivlen */
goto err;
res = atoi(p);
if (atoi(p) > 32)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iv */
goto err;
if (strlen(p) != res * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* encrypted_keyblob_size */
goto err;
res = atoi(p);
if (res > 128)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* encrypted keyblob */
goto err;
if (strlen(p) != res * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* chunk number */
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* data_size */
goto err;
res = atoi(p);
if ((p = strtok(NULL, "*")) == NULL) /* chunk */
goto err;
if (strlen(p) != res * 2)
goto err;
if (res > 8192)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* scp */
goto err;
res = atoi(p);
if (res == 1) {
if ((p = strtok(NULL, "*")) == NULL) /* zchunk */
goto err;
if (strlen(p) != 4096 * 2)
goto err;
}
}
else if (headerver == 1) {
if ((p = strtok(NULL, "*")) == NULL) /* salt len */
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) != res * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* len_wrapped_aes_key */
goto err;
res = atoi(p);
if (res > 296)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* wrapped_aes_key */
goto err;
if (strlen(p) != res * 2)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* len_hmac_sha1_key */
goto err;
res = atoi(p);
if (res > 300)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* hmac_sha1_key */
goto err;
if (strlen(p) != res * 2)
goto err;
}
else
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
ctcopy += 5;
p = strtok(ctcopy, "*");
cs.headerver = atoi(p);
if (cs.headerver == 2) {
p = strtok(NULL, "*");
cs.saltlen = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.ivlen = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.encrypted_keyblob_size = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.encrypted_keyblob_size; i++)
cs.encrypted_keyblob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.cno = atoi(p);
p = strtok(NULL, "*");
cs.data_size = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.data_size; i++)
cs.chunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.scp = atoi(p);
if (cs.scp == 1) {
p = strtok(NULL, "*");
for (i = 0; i < 4096; i++)
cs.zchunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
if ((p = strtok(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
else {
p = strtok(NULL, "*");
cs.saltlen = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.saltlen; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.len_wrapped_aes_key = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.len_wrapped_aes_key; i++)
cs.wrapped_aes_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.len_hmac_sha1_key = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.len_hmac_sha1_key; i++)
cs.wrapped_hmac_sha1_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
if ((p = strtok(NULL, "*")))
cs.iterations = atoi(p);
else
cs.iterations = 1000;
}
if (cs.iterations == 0)
cs.iterations = 1000;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, 20);
currentsalt.length = 20;
currentsalt.outlen = 32;
currentsalt.iterations = cur_salt->iterations;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int apple_des3_ede_unwrap_key1(unsigned char *wrapped_key, int wrapped_key_len, unsigned char *decryptKey)
{
EVP_CIPHER_CTX ctx;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char TEMP2[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char CEKICV[sizeof(cur_salt->wrapped_hmac_sha1_key)];
unsigned char IV[8] = { 0x4a, 0xdd, 0xa2, 0x2c, 0x79, 0xe8, 0x21, 0x05 };
int outlen, tmplen, i;
EVP_CIPHER_CTX_init(&ctx);
EVP_DecryptInit_ex(&ctx, EVP_des_ede3_cbc(), NULL, decryptKey, IV);
if (!EVP_DecryptUpdate(&ctx, TEMP1, &outlen, wrapped_key, wrapped_key_len)) {
goto err;
}
if (!EVP_DecryptFinal_ex(&ctx, TEMP1 + outlen, &tmplen)) {
goto err;
}
outlen += tmplen;
EVP_CIPHER_CTX_cleanup(&ctx);
for (i = 0; i < outlen; i++) {
TEMP2[i] = TEMP1[outlen - i - 1];
}
EVP_CIPHER_CTX_init(&ctx);
EVP_DecryptInit_ex(&ctx, EVP_des_ede3_cbc(), NULL, decryptKey, TEMP2);
if (!EVP_DecryptUpdate(&ctx, CEKICV, &outlen, TEMP2 + 8, outlen - 8)) {
goto err;
}
if (!EVP_DecryptFinal_ex(&ctx, CEKICV + outlen, &tmplen)) {
goto err;
}
outlen += tmplen;
EVP_CIPHER_CTX_cleanup(&ctx);
return 0;
err:
EVP_CIPHER_CTX_cleanup(&ctx);
return -1;
}
static int hash_plugin_check_hash(unsigned char *derived_key)
{
unsigned char hmacsha1_key_[20];
unsigned char aes_key_[32];
int ret = 0;
if (cur_salt->headerver == 1) {
if ((apple_des3_ede_unwrap_key1(cur_salt->wrapped_aes_key, cur_salt->len_wrapped_aes_key, derived_key) == 0) && (apple_des3_ede_unwrap_key1(cur_salt->wrapped_hmac_sha1_key, cur_salt->len_hmac_sha1_key, derived_key) == 0)) {
return 1;
}
}
else {
EVP_CIPHER_CTX ctx;
unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)];
int outlen, tmplen;
AES_KEY aes_decrypt_key;
unsigned char outbuf[8192 + 1];
unsigned char outbuf2[4096 + 1];
unsigned char iv[20];
HMAC_CTX hmacsha1_ctx;
int mdlen;
#ifdef DMG_DEBUG
unsigned char *r;
#endif
const char nulls[8] = { 0 };
EVP_CIPHER_CTX_init(&ctx);
EVP_DecryptInit_ex(&ctx, EVP_des_ede3_cbc(), NULL, derived_key, cur_salt->iv);
if (!EVP_DecryptUpdate(&ctx, TEMP1, &outlen,
cur_salt->encrypted_keyblob, cur_salt->encrypted_keyblob_size)) {
return 0;
}
EVP_DecryptFinal_ex(&ctx, TEMP1 + outlen, &tmplen);
EVP_CIPHER_CTX_cleanup(&ctx);
outlen += tmplen;
memcpy(aes_key_, TEMP1, 32);
memcpy(hmacsha1_key_, TEMP1, 20);
HMAC_CTX_init(&hmacsha1_ctx);
HMAC_Init_ex(&hmacsha1_ctx, hmacsha1_key_, 20, EVP_sha1(), NULL);
HMAC_Update(&hmacsha1_ctx, (void *) &cur_salt->cno, 4);
HMAC_Final(&hmacsha1_ctx, iv, (unsigned int *) &mdlen);
HMAC_CTX_cleanup(&hmacsha1_ctx);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->chunk, outbuf, cur_salt->data_size, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf, cur_salt->data_size, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found!\n\n");
#endif
ret = 1;
}
/* These tests seem to be obsoleted by the 8xNULL test */
#ifdef DMG_DEBUG
/* </plist> is a pretty generic signature for Apple */
if (memmem(outbuf, cur_salt->data_size, (void*)"</plist>", 8)) {
if (!bench_running)
fprintf(stderr, "</plist> found!\n\n");
ret = 1;
}
/* Journalled HFS+ */
if (memmem(outbuf, cur_salt->data_size, (void*)"jrnlhfs+", 8)) {
if (!bench_running)
fprintf(stderr, "jrnlhfs+ found!\n\n");
ret = 1;
}
/* Handle compressed DMG files, CMIYC 2012 and self-made
samples. Is this test obsoleted by the </plist> one? */
if ((r = memmem(outbuf, cur_salt->data_size, (void*)"koly", 4))) {
unsigned int *u32Version = (unsigned int *)(r + 4);
if (HTONL(*u32Version) == 4) {
if (!bench_running)
fprintf(stderr, "koly found!\n\n");
ret = 1;
}
}
/* Handle VileFault sample images */
if (memmem(outbuf, cur_salt->data_size, (void*)"EFI PART", 8)) {
if (!bench_running)
fprintf(stderr, "EFI PART found!\n\n");
ret = 1;
}
/* Apple is a good indication but it's short enough to
produce false positives */
if (memmem(outbuf, cur_salt->data_size, (void*)"Apple", 5)) {
if (!bench_running)
fprintf(stderr, "Apple found!\n\n");
ret = 1;
}
#endif /* DMG_DEBUG */
/* Second buffer test. If present, *this* is the very first block of the DMG */
if (cur_salt->scp == 1) {
int cno = 0;
HMAC_CTX_init(&hmacsha1_ctx);
HMAC_Init_ex(&hmacsha1_ctx, hmacsha1_key_, 20, EVP_sha1(), NULL);
HMAC_Update(&hmacsha1_ctx, (void *) &cno, 4);
HMAC_Final(&hmacsha1_ctx, iv, (unsigned int *) &mdlen);
HMAC_CTX_cleanup(&hmacsha1_ctx);
if (cur_salt->encrypted_keyblob_size == 48)
AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key);
else
AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->zchunk, outbuf2, 4096, &aes_decrypt_key, iv, AES_DECRYPT);
/* 8 consecutive nulls */
if (memmem(outbuf2, 4096, (void*)nulls, 8)) {
#ifdef DMG_DEBUG
if (!bench_running)
fprintf(stderr, "NULLS found in alternate block!\n\n");
#endif
ret = 1;
}
#ifdef DMG_DEBUG
/* This test seem to be obsoleted by the 8xNULL test */
if (memmem(outbuf2, 4096, (void*)"Press any key to reboot", 23)) {
if (!bench_running)
fprintf(stderr, "MS-DOS UDRW signature found in alternate block!\n\n");
ret = 1;
}
#endif /* DMG_DEBUG */
}
#ifdef DMG_DEBUG
/* Write block as hex, strings or raw to a file. */
if (ret && !bench_running) {
#if DMG_DEBUG == 4
int fd;
if ((fd = open("dmg.debug.main", O_RDWR | O_CREAT | O_TRUNC, 0660)) == -1)
perror("open()");
else {
if (flock(fd, LOCK_EX))
perror("flock()");
if ((write(fd, outbuf, cur_salt->data_size) == -1))
perror("write()");
if (cur_salt->scp == 1)
if ((write(fd, outbuf2, 4096) == -1))
perror("write()");
if (close(fd))
perror("close");
}
#endif
#if DMG_DEBUG == 3
dump_stuff(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_stuff(outbuf2, 4096);
}
#endif
#if DMG_DEBUG == 2
dump_text(outbuf, cur_salt->data_size);
if (cur_salt->scp == 1) {
fprintf(stderr, "2nd block:\n");
dump_text(outbuf2, 4096);
}
#endif
}
#endif /* DMG_DEBUG */
}
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
global_work_size = (((count + local_work_size - 1) / local_work_size) * local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, &local_work_size, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (hash_plugin_check_hash((unsigned char*)outbuffer[index].v) == 1)
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
#endif
struct fmt_main fmt_opencl_dmg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef DMG_DEBUG
FMT_NOT_EXACT |
#endif
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
dmg_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
simd-9.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort ();
int a[32][32] __attribute__((aligned (32))) = { { 1 } };
struct S { int s; };
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:int:omp_out += omp_in)
__attribute__((noinline, noclone)) int
foo (void)
{
int i, j, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp simd aligned(a : 32) lastprivate (i, j) reduction(+:s) reduction(foo:t, u) collapse(2)
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
{
int *q = &i;
int *r = &j;
int x = a[i][j];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s || i != 32 || j != 32)
abort ();
return s.s;
}
__attribute__((noinline, noclone)) int
bar (void)
{
int i, j, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp simd aligned(a:32)reduction(+:s)reduction(foo:t,u)collapse(2)
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
{
int *q = &i;
int *r = &j;
int x = a[i][j];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s || i != 32 || j != 32)
abort ();
return s.s;
}
int
main ()
{
int i, j;
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
a[i][j] = j + (i / 4);
int s = foo ();
if (s != 19456)
abort ();
if (bar () != 19456)
abort ();
return 0;
}
|
SoaDistanceTableAB.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_DTDIMPL_AB_H
#define QMCPLUSPLUS_DTDIMPL_AB_H
#include "Utilities/FairDivide.h"
namespace qmcplusplus
{
/**@ingroup nnlist
* @brief A derived classe from DistacneTableData, specialized for AB using a transposed form
*/
template<typename T, unsigned D, int SC>
struct SoaDistanceTableAB : public DTD_BConds<T, D, SC>, public DistanceTableData
{
SoaDistanceTableAB(const ParticleSet& source, ParticleSet& target)
: DTD_BConds<T, D, SC>(source.Lattice), DistanceTableData(source, target)
{
resize(source.getTotalNum(), target.getTotalNum());
}
void resize(int ns, int nt)
{
N_sources = ns;
N_targets = nt;
if (N_sources * N_targets == 0)
return;
// initialize memory containers and views
const int Nsources_padded = getAlignedSize<T>(N_sources);
distances_.resize(N_targets);
displacements_.resize(N_targets);
for (int i = 0; i < N_targets; ++i)
{
distances_[i].resize(Nsources_padded);
displacements_[i].resize(Nsources_padded);
}
// The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function
// temp_r_ is padded explicitly while temp_dr_ is padded internally
temp_r_.resize(Nsources_padded);
temp_dr_.resize(N_sources);
}
SoaDistanceTableAB() = delete;
SoaDistanceTableAB(const SoaDistanceTableAB&) = delete;
/** evaluate the full table */
inline void evaluate(ParticleSet& P)
{
#pragma omp parallel
{
int first, last;
FairDivideAligned(N_sources, getAlignment<T>(), omp_get_num_threads(), omp_get_thread_num(), first, last);
//be aware of the sign of Displacement
for (int iat = 0; iat < N_targets; ++iat)
DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(),
distances_[iat].data(), displacements_[iat], first, last);
}
}
///evaluate the temporary pair relations
inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old)
{
DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_,
0, N_sources);
// If the full table is not ready all the time, overwrite the current value.
// If this step is missing, DT values can be undefined in case a move is rejected.
if (!need_full_table_)
DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(),
distances_[iat].data(), displacements_[iat], 0, N_sources);
}
///update the stripe for jat-th particle
inline void update(IndexType iat, bool partial_update)
{
std::copy_n(temp_r_.data(), N_sources, distances_[iat].data());
for (int idim = 0; idim < D; ++idim)
std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim));
}
size_t get_neighbors(int iat,
RealType rcut,
int* restrict jid,
RealType* restrict dist,
PosType* restrict displ) const
{
constexpr T cminus(-1);
size_t nn = 0;
for (int jat = 0; jat < N_targets; ++jat)
{
const RealType rij = distances_[jat][iat];
if (rij < rcut)
{ //make the compact list
jid[nn] = jat;
dist[nn] = rij;
displ[nn] = cminus * displacements_[jat][iat];
nn++;
}
}
return nn;
}
int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const
{
RealType min_dist = std::numeric_limits<RealType>::max();
int index = -1;
if (newpos)
{
for (int jat = 0; jat < N_sources; ++jat)
if (temp_r_[jat] < min_dist)
{
min_dist = temp_r_[jat];
index = jat;
}
if (index >= 0)
{
r = min_dist;
dr = temp_dr_[index];
}
}
else
{
for (int jat = 0; jat < N_sources; ++jat)
if (distances_[iat][jat] < min_dist)
{
min_dist = distances_[iat][jat];
index = jat;
}
if (index >= 0)
{
r = min_dist;
dr = displacements_[iat][index];
}
}
return index;
}
size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const
{
size_t nn = 0;
for (int jat = 0; jat < N_targets; ++jat)
{
const RealType rij = distances_[jat][iat];
if (rij < rcut)
{ //make the compact list
dist[nn] = rij;
nn++;
}
}
return nn;
}
};
} // namespace qmcplusplus
#endif
|
reduction.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <omp.h>
#include "timer.h"
/* Code copied from PDF */
double sum(double* vec, int len)
{
int i;
double accu = 0.0;
#pragma omp parallel for reduction(+:accu)
for (i = 0; i < len; i++) {
accu = accu + vec[i];
}
return accu;
}
double reduce(double (fun)(double, double),
double* vec, int len, double neutral)
{
int i;
double accu = neutral;
#pragma omp parallel for
for (i = 0; i < len; i++) {
#pragma omp critical
accu = fun(accu, vec[i]);
}
return accu;
}
double reduce2(double (fun)(double, double),
double* vec, int len, double neutral)
{
/* We need a copy of the original head of the vector, as well as a vector to work in */
double* start_vec = vec;
double* calc_vec = NULL;
/* We calculate the largest power of two (n) that will fit in len */
int calc_vec_len_copy;
int vec_len = len;
int calc_vec_len = (int)pow(2, floor(log2(vec_len)));
double accu = neutral;
/* Continue until the whole original vector has been reduced */
while (vec_len != 0) {
/* Remove the current working vector from the original vector. */
vec_len -= calc_vec_len;
/* Save the length of the the current working vector to continue
* with the leftovers of the original vector. */
calc_vec_len_copy = calc_vec_len;
/* While the current working vector has not yet been reduced to zero,
* keep merging sets of 2 with the function given. */
while (calc_vec_len != 1) {
/* New vector for the results, automatically remove the old. */
calc_vec = malloc((calc_vec_len / 2) * sizeof(double));
/* The merging of sets using the function can be done parallel
* Because there are no race conditions in this loop. */
#pragma omp parallel for
for (int i = 0; i < calc_vec_len; i+=2)
calc_vec[i/2] = fun(vec[i], vec[i+1]);
/* Prepare for next step */
vec = calc_vec;
calc_vec_len = calc_vec_len / 2;
}
/* Working vector has been reduced to a single term. Reduce with
* the previously done parts. */
accu = fun(accu, vec[0]);
/* Shift the original vector beyond all the done indices */
start_vec += calc_vec_len_copy;
vec = start_vec;
/* Calculate the length of the new working vector */
calc_vec_len = (int)pow(2, floor(log2(vec_len)));
}
return accu;
}
double fun(double a, double b) {
return (a+a+b+b);
}
int main(int argc, char *argv[])
{
int num_threads;
int vec_size;
int custom_reduce_method;
double accu;
double time;
double *vec;
if (argc < 4) {
printf("Usage: %s num_threads vec_size [reduce method]\n", argv[0]);
printf(" - num_threads: number of threads to use for simulation, "
"should be >=1\n");
printf(" - vec_size: size of the vector to do reduction on 10^n"
"should be >=10\n");
printf(" - [reduce_method]: custom | sequential.");
return EXIT_FAILURE;
}
num_threads = atoi(argv[1]);
vec_size = pow(10, atoi(argv[2]));
if (num_threads < 1) {
printf("argument error: num_threads should be >=1.\n");
return EXIT_FAILURE;
}
if (vec_size < 4) {
printf("argument error: vec_size should be >=4.\n");
return EXIT_FAILURE;
}
if (strcmp(argv[3], "sequential") == 0) {
custom_reduce_method = 0;
} else {
custom_reduce_method = 1;
}
vec = (double*)malloc(vec_size * sizeof(double));
/* Fill a vector with a sinus values */
#pragma omp parallel for
for (int i = 0; i < vec_size; i++) {
vec[i] = sin(i);
}
omp_set_num_threads(num_threads);
/* Start timing the effeciency of openMP */
timer_start();
/* Calculate a reduced vector with openMP */
if (custom_reduce_method) {
accu = reduce2(fun, vec, vec_size, 0);
/* accu = sum(vec, vec_size); */
} else {
accu = reduce(fun, vec, vec_size, 0);
}
/* Stop timing */
time = timer_end();
printf("%d, %d, %g\n",num_threads, vec_size, time);
free(vec);
vec = NULL;
return EXIT_SUCCESS;
}
|
weights.c |
/*
Author: Mohammed Ahmed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
Compute the weights for each node for the weighted least square
*/
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <math.h>
#include "inc/geometry.h"
#include "inc/allocator.h"
#include "inc/msh/mesh.h"
static inline void
compute_terms(const struct geometry *restrict g,
const double *restrict w,
struct xyz *restrict terms0,
struct xyz *restrict terms1)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
double c0;
double c1;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
c0 = - dx * w[node0 * 7 + 1] + dy;
c1 = - dx * w[node0 * 7 + 2] + dz;
c1 = - w[node0 * 7 + 4] * c0 + c1;
termx = w[node0 * 7 + 3] * w[node0 * 7 + 1] * c0;
termx = dx * w[node0 * 7 + 0] - termx;
termx += w[node0 * 7 + 6] * c1;
termy = w[node0 * 7 + 4] * w[node0 * 7 + 5] * c1;
termy = w[node0 * 7 + 3] * c0 - termy;
termz = w[node0 * 7 + 5] * c1;
terms0->x0[i] = termx;
terms0->x1[i] = termy;
terms0->x2[i] = termz;
}
if(part[node1] == t)
{
c0 = dx * w[node1 * 7 + 1] - dy;
c1 = dx * w[node1 * 7 + 2] - dz;
c1 = - w[node1 * 7 + 4] * c0 + c1;
termx = w[node1 * 7 + 3] * w[node1 * 7 + 1] * c0;
termx = -dx * w[node1 * 7 + 0] - termx;
termx += w[node1 * 7 + 6] * c1;
termy = w[node1 * 7 + 4] * w[node1 * 7 + 5] * c1;
termy = w[node1 * 7 + 3] * c0 - termy;
termz = w[node1 * 7 + 5] * c1;
terms1->x0[i] = termx;
terms1->x1[i] = termy;
terms1->x2[i] = termz;
}
}
}
}
/* Do w22 */
static inline void
w2alloc(const struct geometry *restrict g, double *restrict w)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
if(part[node0] == t)
{
const double d0 = w[node0 * 7 + 1] / w[node0 * 7 + 0];
const double d0_ = dy - dx * d0;
const double d1 = w[node0 * 7 + 2] / w[node0 * 7 + 0];
const double d1_ = dz - dx * d1;
const double d2 = w[node0 * 7 + 4] / w[node0 * 7 + 3];
const double d2_ = d1_ - d2 * d0_;
w[node0 * 7 + 5] += d2_ * d2_;
}
if(part[node1] == t)
{
const double d0 = w[node1 * 7 + 1] / w[node1 * 7 + 0];
const double d0_ = -dy + dx * d0;
const double d1 = w[node1 * 7 + 2] / w[node1 * 7 + 0];
const double d1_ = -dz + dx * d1;
const double d2 = w[node1 * 7 + 4] / w[node1 * 7 + 3];
const double d2_ = d1_ - d2 * d0_;
w[node1 * 7 + 5] += d2_ * d2_;
}
}
}
}
/* Do w11 and w12 */
static inline void
w1alloc(const struct geometry *restrict g, double *restrict w)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
/* Compute the difference of each coordinate component */
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
if(part[node0] == t)
{
const double d = w[node0 * 7 + 1] / w[node0 * 7 + 0];
const double d_ = dy - dx * d;
w[node0 * 7 + 3] += d_ * d_;
w[node0 * 7 + 4] += d_ * dz;
}
if(part[node1] == t)
{
const double d = w[node1 * 7 + 1] / w[node1 * 7 + 0];
const double d_ = -dy + dx * d;
w[node1 * 7 + 3] += d_ * d_;
w[node1 * 7 + 4] -= d_ * dz;
}
}
}
}
/*
Compute w00, w01, and w02 in parallel
*/
static inline void
w0alloc(const struct geometry *restrict g, double *restrict w)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
/*
* Write-back: Update endpoints
* */
if(part[node0] == t) // Do the left endpoint
{
const double res_x = coordx1 - coordx0;
const double res_y = coordy1 - coordy0;
const double res_z = coordz1 - coordz0;
w[node0 * 7 + 0] += res_x * res_x;
w[node0 * 7 + 1] += res_x * res_y;
w[node0 * 7 + 2] += res_x * res_z;
}
if(part[node1] == t) // Do the right endpoint
{
const double res_x = coordx0 - coordx1;
const double res_y = coordy0 - coordy1;
const double res_z = coordz0 - coordz1;
w[node1 * 7 + 0] += res_x * res_x;
w[node1 * 7 + 1] += res_x * res_y;
w[node1 * 7 + 2] += res_x * res_z;
}
}
}
}
void
wmalloc(struct geometry *restrict g)
{
size_t nnodes = g->n->sz;
double *restrict w;
kcalloc(7 * nnodes, sizeof(double), (void *) &w);
uint32_t i;
/* Do w00, w01, and w02 */
w0alloc(g, w);
/* Compute ||x|| (norm) and divide the other by
the computed norm */
#pragma omp parallel
{
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 0] = sqrt(w[i * 7 + 0]);
#pragma omp for
for(i = 0; i < nnodes; i++)
{
w[i * 7 + 1] /= w[i * 7 + 0];
w[i * 7 + 2] /= w[i * 7 + 0];
}
}
/* Do w11 and w12 */
w1alloc(g, w);
#pragma omp parallel
{
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 3] = sqrt(w[i * 7 + 3]);
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 4] /= w[i * 7 + 3];
}
/* Do w22 */
w2alloc(g, w);
#pragma omp parallel
{
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 5] = sqrt(w[i * 7 + 5]);
/* Update the magnitudes. Stuffs contributed by Dinesh 1998 */
#pragma omp for
for(i = 0; i < nnodes; i++)
{
double sw00 = w[i * 7 + 0] * w[i * 7 + 0];
double sw11 = w[i * 7 + 3] * w[i * 7 + 3];
double sw22 = w[i * 7 + 5] * w[i * 7 + 5];
double w00 = 1.f / sw00;
double w11 = 1.f / sw11;
double w22 = 1.f / sw22;
double w01 = w[i * 7 + 1] / w[i * 7 + 0];
double w02 = w[i * 7 + 2] / w[i * 7 + 0];
double w12 = w[i * 7 + 4] / w[i * 7 + 3];
double m0 = w[i * 7 + 1] * w[i * 7 + 4];
m0 -= w[i * 7 + 2] * w[i * 7 + 3];
double m1 = w[i * 7 + 0] * w[i * 7 + 3] * sw22;
double w33 = m0 / m1;
w[i * 7 + 0] = w00;
w[i * 7 + 3] = w11;
w[i * 7 + 5] = w22;
w[i * 7 + 1] = w01;
w[i * 7 + 2] = w02;
w[i * 7 + 4] = w12;
w[i * 7 + 6] = w33;
}
}
size_t nedges = g->e->sz;
struct xyz *restrict terms0;
kmalloc(1, sizeof(struct xyz), (void *) &terms0);
double *restrict wtermsx0;
kcalloc(nedges, sizeof(double), (void *) &wtermsx0);
double *restrict wtermsy0;
kcalloc(nedges, sizeof(double), (void *) &wtermsy0);
double *restrict wtermsz0;
kcalloc(nedges, sizeof(double), (void *) &wtermsz0);
terms0->x0 = wtermsx0;
terms0->x1 = wtermsy0;
terms0->x2 = wtermsz0;
struct xyz *restrict terms1;
kmalloc(1, sizeof(struct xyz), (void *) &terms1);
double *restrict wtermsx1;
kcalloc(nedges, sizeof(double), (void *) &wtermsx1);
double *restrict wtermsy1;
kcalloc(nedges, sizeof(double), (void *) &wtermsy1);
double *restrict wtermsz1;
kcalloc(nedges, sizeof(double), (void *) &wtermsz1);
terms1->x0 = wtermsx1;
terms1->x1 = wtermsy1;
terms1->x2 = wtermsz1;
compute_terms(g, w, terms0, terms1);
kfree(w);
struct weights *restrict weights;
kmalloc(1, sizeof(struct weights), (void *) &weights);
weights->w0 = terms0;
weights->w1 = terms1;
g->e->w = weights;
}
|
par_add_cycle.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* ParAMG cycling routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGAdditiveCycle( void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
hypre_ParCSRMatrix *Lambda;
hypre_ParCSRMatrix *Atilde;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParVector *Vtemp;
hypre_ParVector *Ztemp;
hypre_ParVector *Xtilde, *Rtilde;
HYPRE_Int **CF_marker_array;
HYPRE_Int num_levels;
HYPRE_Int addlvl, add_end;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Int i, j, num_rows;
HYPRE_Int n_global;
HYPRE_Int rlx_order;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int level;
HYPRE_Int coarse_grid;
HYPRE_Int fine_grid;
HYPRE_Int rlx_down;
HYPRE_Int rlx_up;
HYPRE_Int rlx_coarse;
HYPRE_Int *grid_relax_type;
HYPRE_Int *num_grid_sweeps;
hypre_Vector **l1_norms;
HYPRE_Real alpha, beta;
HYPRE_Real *u_data;
HYPRE_Real *v_data;
hypre_Vector *l1_norms_lvl;
HYPRE_Real *D_inv;
HYPRE_Real *x_global;
HYPRE_Real *r_global;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
#if 0
HYPRE_Real *D_mat;
HYPRE_Real *S_vec;
#endif
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
P_array = hypre_ParAMGDataPArray(amg_data);
R_array = hypre_ParAMGDataRArray(amg_data);
CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data);
Vtemp = hypre_ParAMGDataVtemp(amg_data);
Ztemp = hypre_ParAMGDataZtemp(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
simple = hypre_ParAMGDataSimple(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
Lambda = hypre_ParAMGDataLambda(amg_data);
Atilde = hypre_ParAMGDataAtilde(amg_data);
Xtilde = hypre_ParAMGDataXtilde(amg_data);
Rtilde = hypre_ParAMGDataRtilde(amg_data);
l1_norms = hypre_ParAMGDataL1Norms(amg_data);
D_inv = hypre_ParAMGDataDinv(amg_data);
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
omega = hypre_ParAMGDataOmega(amg_data);
rlx_order = hypre_ParAMGDataRelaxOrder(amg_data);
num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
/* Initialize */
addlvl = hypre_max(additive, mult_additive);
addlvl = hypre_max(addlvl, simple);
if (add_last_lvl == -1 ) add_end = num_levels-1;
else add_end = add_last_lvl;
Solve_err_flag = 0;
/*---------------------------------------------------------------------
* Main loop of cycling --- multiplicative version --- V-cycle
*--------------------------------------------------------------------*/
/* down cycle */
rlx_down = grid_relax_type[1];
rlx_up = grid_relax_type[2];
rlx_coarse = grid_relax_type[3];
for (level = 0; level < num_levels-1; level++)
{
fine_grid = level;
coarse_grid = level + 1;
u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid]));
v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp));
l1_norms_lvl = l1_norms[level];
hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0);
if (level < addlvl || level > add_end) /* multiplicative version */
{
/* smoothing step */
if (rlx_down == 0)
{
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]];
}
}
else if (rlx_down != 18)
{
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid], rlx_down,rlx_order,1,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[level] ? hypre_VectorData(l1_norms[level]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
}
}
else
{
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
u_data[i] += v_data[i] / hypre_VectorData(l1_norms_lvl)[i];
}
}
}
alpha = -1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid],
beta, Vtemp);
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
else /* additive version */
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
if (level == 0) /* compute residual */
{
hypre_ParVectorCopy(Vtemp, Rtilde);
hypre_ParVectorCopy(U_array[fine_grid],Xtilde);
}
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
}
/* additive smoothing and solve coarse grid */
if (addlvl < num_levels)
{
if (simple > -1)
{
x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde));
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_global; i++)
x_global[i] += D_inv[i]*r_global[i];
}
else
{
if (num_grid_sweeps[1] > 1)
{
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde));
hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global);
hypre_SeqVectorInitialize(Tmptilde_local);
hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local;
hypre_ParVectorOwnsData(Tmptilde) = 1;
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde);
hypre_ParVectorScale(2.0,Rtilde);
hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde);
hypre_ParVectorDestroy(Tmptilde);
}
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde);
}
if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]);
}
if (add_end < num_levels -1)
{
fine_grid = num_levels -1;
for (j=0; j < num_grid_sweeps[3]; j++)
if (rlx_coarse == 18)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
else
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
NULL, rlx_coarse,0,0,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
}
/* up cycle */
for (level = num_levels-1; level > 0; level--)
{
fine_grid = level - 1;
coarse_grid = level;
if (level <= addlvl || level > add_end+1) /* multiplicative version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
if (rlx_up != 18)
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
rlx_up,rlx_order,2,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
else if (rlx_order)
{
HYPRE_Int loc_relax_points[2];
loc_relax_points[0] = -1;
loc_relax_points[1] = 1;
for (j=0; j < num_grid_sweeps[2]; j++)
for (i=0; i < 2; i++)
hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
loc_relax_points[i],
1.0,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp);
}
else
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
}
else /* additive version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
}
}
return(Solve_err_flag);
}
HYPRE_Int hypre_CreateLambda(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
MPI_Comm comm;
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_ParCSRMatrix *Lambda;
hypre_CSRMatrix *L_diag;
hypre_CSRMatrix *L_offd;
hypre_ParCSRMatrix *Atilde;
hypre_CSRMatrix *Atilde_diag;
hypre_CSRMatrix *Atilde_offd;
HYPRE_Real *Atilde_diag_data;
HYPRE_Real *Atilde_offd_data;
hypre_CSRMatrix *A_tmp_diag;
hypre_CSRMatrix *A_tmp_offd;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
hypre_ParCSRCommPkg *comm_pkg;
hypre_ParCSRCommPkg *L_comm_pkg = NULL;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Real *L_diag_data;
HYPRE_Real *L_offd_data;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_data;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
hypre_Vector *l1_norms;
HYPRE_Real *A_tmp_diag_data;
HYPRE_Real *A_tmp_offd_data;
HYPRE_Real *D_data = NULL;
HYPRE_Real *D_data_offd = NULL;
HYPRE_Int *L_diag_i;
HYPRE_Int *L_diag_j;
HYPRE_Int *L_offd_i;
HYPRE_Int *L_offd_j;
HYPRE_Int *Atilde_diag_i;
HYPRE_Int *Atilde_diag_j;
HYPRE_Int *Atilde_offd_i;
HYPRE_Int *Atilde_offd_j;
HYPRE_Int *A_tmp_diag_i;
HYPRE_Int *A_tmp_offd_i;
HYPRE_Int *A_tmp_diag_j;
HYPRE_Int *A_tmp_offd_j;
HYPRE_Int *L_recv_ptr = NULL;
HYPRE_Int *L_send_ptr = NULL;
HYPRE_Int *L_recv_procs = NULL;
HYPRE_Int *L_send_procs = NULL;
HYPRE_Int *L_send_map_elmts = NULL;
HYPRE_Int *recv_procs;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_elmts;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *all_send_procs = NULL;
HYPRE_Int *all_recv_procs = NULL;
HYPRE_Int *remap = NULL;
HYPRE_Int *level_start;
HYPRE_Int addlvl;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int num_levels;
HYPRE_Int num_add_lvls;
HYPRE_Int num_procs;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int num_sends_L = 0;
HYPRE_Int num_recvs_L = 0;
HYPRE_Int send_data_L = 0;
HYPRE_Int num_rows_L = 0;
HYPRE_Int num_rows_tmp = 0;
HYPRE_Int num_cols_offd_L = 0;
HYPRE_Int num_cols_offd = 0;
HYPRE_Int level, i, j, k;
HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd;
HYPRE_Int A_cnt_diag, A_cnt_offd;
HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start;
HYPRE_Int start_diag, start_offd, indx, cnt_map;
HYPRE_Int start, j_indx, index, cnt_level;
HYPRE_Int max_sends, max_recvs;
HYPRE_Int ns;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
hypre_Vector **l1_norms_ptr = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Int relax_type; */
HYPRE_Int add_rlx;
HYPRE_Int add_last_lvl, add_end;
HYPRE_Real add_rlx_wt;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
comm = hypre_ParCSRMatrixComm(A_array[0]);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1];
hypre_MPI_Comm_size(comm,&num_procs);
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
addlvl = hypre_max(additive, mult_additive);
if (add_last_lvl != -1) add_end = add_last_lvl+1;
else add_end = num_levels;
num_add_lvls = add_end+1-addlvl;
level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1, HYPRE_MEMORY_HOST);
send_data_L = 0;
num_rows_L = 0;
num_cols_offd_L = 0;
num_nonzeros_diag = 0;
num_nonzeros_offd = 0;
level_start[0] = 0;
cnt = 1;
max_sends = 0;
max_recvs = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd);
num_rows_L += num_rows_tmp;
level_start[cnt] = level_start[cnt-1] + num_rows_tmp;
cnt++;
num_cols_offd_L += num_cols_offd;
num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp];
num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
max_sends += num_sends;
if (num_sends)
send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends);
max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg);
}
}
if (max_sends >= num_procs ||max_recvs >= num_procs)
{
max_sends = num_procs;
max_recvs = num_procs;
}
if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends, HYPRE_MEMORY_HOST);
if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs, HYPRE_MEMORY_HOST);
cnt_send = 0;
cnt_recv = 0;
if (max_sends || max_recvs)
{
if (max_sends < num_procs && max_recvs < num_procs)
{
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
for (j = 0; j < num_sends; j++)
all_send_procs[cnt_send++] = send_procs[j];
for (j = 0; j < num_recvs; j++)
all_recv_procs[cnt_recv++] = recv_procs[j];
}
}
if (max_sends)
{
hypre_qsort0(all_send_procs, 0, max_sends-1);
num_sends_L = 1;
this_proc = all_send_procs[0];
for (i=1; i < max_sends; i++)
{
if (all_send_procs[i] > this_proc)
{
this_proc = all_send_procs[i];
all_send_procs[num_sends_L++] = this_proc;
}
}
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
for (j=0; j < num_sends_L; j++)
L_send_procs[j] = all_send_procs[j];
hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST);
}
if (max_recvs)
{
hypre_qsort0(all_recv_procs, 0, max_recvs-1);
num_recvs_L = 1;
this_proc = all_recv_procs[0];
for (i=1; i < max_recvs; i++)
{
if (all_recv_procs[i] > this_proc)
{
this_proc = all_recv_procs[i];
all_recv_procs[num_recvs_L++] = this_proc;
}
}
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
for (j=0; j < num_recvs_L; j++)
L_recv_procs[j] = all_recv_procs[j];
hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST);
}
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST);
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
for (k = 0; k < num_sends; k++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L);
L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k];
}
for (k = 0; k < num_recvs; k++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L);
L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k];
}
}
L_recv_ptr[0] = 0;
for (i=1; i < num_recvs_L; i++)
L_recv_ptr[i+1] += L_recv_ptr[i];
L_send_ptr[0] = 0;
for (i=1; i < num_sends_L; i++)
L_send_ptr[i+1] += L_send_ptr[i];
}
else
{
num_recvs_L = 0;
num_sends_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
for (j = 0; j < num_sends; j++)
{
this_proc = send_procs[j];
if (all_send_procs[this_proc] == 0)
num_sends_L++;
all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j];
}
for (j = 0; j < num_recvs; j++)
{
this_proc = recv_procs[j];
if (all_recv_procs[this_proc] == 0)
num_recvs_L++;
all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j];
}
}
}
if (max_sends)
{
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST);
num_sends_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_send_procs[j];
if (this_proc)
{
L_send_procs[num_sends_L++] = j;
L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1];
}
}
}
if (max_recvs)
{
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST);
num_recvs_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_recv_procs[j];
if (this_proc)
{
L_recv_procs[num_recvs_L++] = j;
L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1];
}
}
}
}
}
if (max_sends) hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST);
if (max_recvs) hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST);
L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(L_diag);
hypre_CSRMatrixInitialize(L_offd);
if (num_nonzeros_diag)
{
L_diag_data = hypre_CSRMatrixData(L_diag);
L_diag_j = hypre_CSRMatrixJ(L_diag);
}
L_diag_i = hypre_CSRMatrixI(L_diag);
if (num_nonzeros_offd)
{
L_offd_data = hypre_CSRMatrixData(L_offd);
L_offd_j = hypre_CSRMatrixJ(L_offd);
}
L_offd_i = hypre_CSRMatrixI(L_offd);
if (ns > 1)
{
Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(Atilde_diag);
hypre_CSRMatrixInitialize(Atilde_offd);
if (num_nonzeros_diag)
{
Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag);
Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag);
}
Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag);
if (num_nonzeros_offd)
{
Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd);
Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd);
}
Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd);
}
if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST);
if (send_data_L)
{
L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L, HYPRE_MEMORY_HOST);
buf_data = hypre_CTAlloc(HYPRE_Real, send_data_L, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_L)
{
D_data_offd = hypre_CTAlloc(HYPRE_Real, num_cols_offd_L, HYPRE_MEMORY_HOST);
/*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/
remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L, HYPRE_MEMORY_HOST);
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
cnt = 0;
cnt_level = 0;
cnt_diag = 0;
cnt_offd = 0;
cnt_row = 1;
L_diag_i[0] = 0;
L_offd_i[0] = 0;
if (ns > 1)
{
A_cnt_diag = 0;
A_cnt_offd = 0;
Atilde_diag_i[0] = 0;
Atilde_offd_i[0] = 0;
}
for (level=addlvl; level < add_end; level++)
{
row_start = level_start[cnt_level];
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
cnt_level++;
start_diag = L_diag_i[cnt_row-1];
start_offd = L_offd_i[cnt_row-1];
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag);
A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd);
A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
/* Compute new combined communication package */
for (i=0; i < num_sends; i++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L);
indx = L_send_ptr[this_proc];
for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
L_send_map_elmts[indx++] = row_start + send_map_elmts[j];
}
L_send_ptr[this_proc] = indx;
}
cnt_map = 0;
for (i = 0; i < num_recvs; i++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L);
indx = L_recv_ptr[this_proc];
for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
remap[cnt_map++] = indx++;
}
L_recv_ptr[this_proc] = indx;
}
/* Compute Lambda */
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = 1.0 / hypre_VectorData(l1_norms)[i];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
{
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
}
if (num_procs > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_starts[i];
for (j=start; j < send_map_starts[i+1]; j++)
buf_data[index++] = D_data[send_map_elmts[j]];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg,
buf_data, D_data_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
for (i = 0; i < num_rows_tmp; i++)
{
j_indx = A_tmp_diag_i[i];
if (ns > 1)
{
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx];
Atilde_diag_j[A_cnt_diag++] = i+row_start;
}
L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i];
L_diag_j[cnt_diag++] = i+row_start;
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i];
L_diag_j[cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i];
L_offd_j[cnt_offd++] = remap[j_indx];
}
if (ns > 1)
{
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j];
Atilde_diag_j[A_cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j];
Atilde_offd_j[A_cnt_offd++] = remap[j_indx];
}
}
}
cnt_row += num_rows_tmp;
}
if (L_send_ptr)
{
for (i=num_sends_L-1; i > 0; i--)
L_send_ptr[i] = L_send_ptr[i-1];
L_send_ptr[0] = 0;
}
else
L_send_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
if (L_recv_ptr)
{
for (i=num_recvs_L-1; i > 0; i--)
L_recv_ptr[i] = L_recv_ptr[i-1];
L_recv_ptr[0] = 0;
}
else
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs;
hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts;
hypre_ParCSRCommPkgComm(L_comm_pkg) = comm;
Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDiag(Lambda) = L_diag;
hypre_ParCSRMatrixOffd(Lambda) = L_offd;
hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg;
hypre_ParCSRMatrixComm(Lambda) = comm;
hypre_ParCSRMatrixOwnsData(Lambda) = 1;
if (ns > 1)
{
/*hypre_ParCSRCommPkg *A_comm_pkg = NULL;
HYPRE_Int *A_recv_ptr = NULL;
HYPRE_Int *A_send_ptr = NULL;
HYPRE_Int *A_recv_procs = NULL;
HYPRE_Int *A_send_procs = NULL;
HYPRE_Int *A_send_map_elmts = NULL;
A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L], HYPRE_MEMORY_HOST);
for (i=0; i<num_recvs_L+1; i++)
A_recv_ptr[i] = L_recv_ptr[i];
for (i=0; i<num_sends_L+1; i++)
A_send_ptr[i] = L_send_ptr[i];
for (i=0; i<num_recvs_L; i++)
A_recv_procs[i] = L_recv_procs[i];
for (i=0; i<num_sends_L; i++)
A_send_procs[i] = L_send_procs[i];
for (i=0; i < L_send_ptr[num_sends_L]; i++)
A_send_map_elmts[i] = L_send_map_elmts[i];
hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs;
hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts;
hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */
Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag;
hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd;
hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg;
hypre_ParCSRMatrixComm(Atilde) = comm;
hypre_ParCSRMatrixOwnsData(Atilde) = 1;
hypre_ParAMGDataAtilde(amg_data) = Atilde;
}
hypre_ParAMGDataLambda(amg_data) = Lambda;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
hypre_TFree(D_data_offd, HYPRE_MEMORY_HOST);
hypre_TFree(D_data, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(remap, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(level_start, HYPRE_MEMORY_HOST);
return Solve_err_flag;
}
HYPRE_Int hypre_CreateDinv(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_CSRMatrix *A_tmp_diag;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
HYPRE_Real *tmp_data;
HYPRE_Real *D_inv = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Real relax_type;*/
HYPRE_Int addlvl;
HYPRE_Int num_levels;
HYPRE_Int num_rows_L;
HYPRE_Int num_rows_tmp;
HYPRE_Int level, i;
HYPRE_Int add_rlx;
HYPRE_Real add_rlx_wt;
HYPRE_Int add_last_lvl, add_end;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
hypre_Vector **l1_norms_ptr = NULL;
hypre_Vector *l1_norms;
HYPRE_Int l1_start;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
addlvl = hypre_ParAMGDataSimple(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
/* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */
if (add_last_lvl == -1 ) add_end = num_levels;
else add_end = add_last_lvl;
num_rows_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_rows_L += num_rows_tmp;
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST);
l1_start = 0;
for (level=addlvl; level < add_end; level++)
{
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_inv[l1_start+i] = 1.0 / hypre_VectorData(l1_norms)[i];
}
}
l1_start += num_rows_tmp;
}
hypre_ParAMGDataDinv(amg_data) = D_inv;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
return Solve_err_flag;
}
|
GB_binop__isle_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32)
// A*D function (colscale): GB (_AxD__isle_uint32)
// D*A function (rowscale): GB (_DxB__isle_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32)
// C=scalar+B GB (_bind1st__isle_uint32)
// C=scalar+B' GB (_bind1st_tran__isle_uint32)
// C=A+scalar GB (_bind2nd__isle_uint32)
// C=A'+scalar GB (_bind2nd_tran__isle_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/wand.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MagickPathExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
Image
*image;
CacheView
*view;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue,
wand_view->exception);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=MagickWandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width);
wand_view->image=DestroyImage(wand_view->image);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~MagickWandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register ssize_t
x;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) duplex->extent.width; x++)
{
PixelSetQuantumPixel(duplex->image,duplex_pixels,
duplex->pixel_wands[id][x]);
duplex_pixels+=GetPixelChannels(duplex->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MagickPathExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != MagickWandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands)
{
PixelWand
***pixel_wands;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickWandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width);
wand_view->exception=exception;
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=MagickWandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickWandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->exception=exception;
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=MagickWandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == MagickWandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == MagickWandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
pixels);
pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict pixels;
register ssize_t
x;
register Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == MagickWandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels);
pixels+=GetPixelChannels(source->image);
}
sync=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
geometric_construction.h | #ifndef __H2OPUS_GEOMETRIC_CONSTRUCTION_H__
#define __H2OPUS_GEOMETRIC_CONSTRUCTION_H__
#include <h2opus/core/hmatrix.h>
#include <h2opus/util/geometric_admissibility.h>
#include <h2opus/util/kdtree.h>
template <class T, int hw, typename EntryGen>
void generateUBasisTreeEntries(TBasisTree<hw> &basis_tree, TH2OpusKDTree<T, hw> &kdtree, EntryGen &entry_gen,
std::vector<int> &level_slices)
{
int num_levels = basis_tree.depth;
// Compute the basis leaves and transfer matrices for the basis tree
int leaf_offset = basis_tree.getLevelStart(num_levels - 1);
int leaf_slices = level_slices[num_levels - 1];
int leaf_ld = basis_tree.leaf_size;
#pragma omp parallel for
for (size_t i = 0; i < (size_t)basis_tree.basis_leaves; i++)
{
int node_index = leaf_offset + i;
int cluster_index = basis_tree.global_cluster_index[node_index];
H2Opus_Real *leaf_entries = basis_tree.getBasisLeaf(i);
entry_gen.u_basis_leaf(leaf_entries, leaf_ld, kdtree, cluster_index, leaf_slices);
}
// Transfer matrices
for (int level = num_levels - 1; level >= 0; level--)
{
int node_start, node_end;
basis_tree.getLevelRange(level, node_start, node_end);
int slices = level_slices[level];
int parent_slices = (level == 0 ? slices : level_slices[level - 1]);
int level_rank = basis_tree.level_data.getLevelRank(level);
#pragma omp parallel for
for (int node_index = node_start; node_index < node_end; node_index++)
{
int parent_index = basis_tree.parent[node_index];
int cluster_index = basis_tree.global_cluster_index[node_index];
int parent_cluster_index = kdtree.getParent(cluster_index);
if (parent_index == H2OPUS_EMPTY_NODE && parent_cluster_index == H2OPUS_EMPTY_NODE)
continue;
H2Opus_Real *transfer = basis_tree.getTransNode(level, node_index - node_start);
entry_gen.u_transfer_matrix(transfer, level_rank, kdtree, cluster_index, parent_cluster_index, slices,
parent_slices);
}
}
}
template <class T, int hw, typename EntryGen>
void generateVBasisTreeEntries(TBasisTree<hw> &basis_tree, TH2OpusKDTree<T, hw> &kdtree, EntryGen &entry_gen,
std::vector<int> &level_slices)
{
int num_levels = basis_tree.depth;
// Compute the basis leaves and transfer matrices for the basis tree
int leaf_offset = basis_tree.getLevelStart(num_levels - 1);
int leaf_slices = level_slices[num_levels - 1];
int leaf_ld = basis_tree.leaf_size;
#pragma omp parallel for
for (size_t i = 0; i < (size_t)basis_tree.basis_leaves; i++)
{
int node_index = leaf_offset + i;
int cluster_index = basis_tree.global_cluster_index[node_index];
H2Opus_Real *leaf_entries = basis_tree.getBasisLeaf(i);
entry_gen.v_basis_leaf(leaf_entries, leaf_ld, kdtree, cluster_index, leaf_slices);
}
// Transfer matrices
for (int level = num_levels - 1; level >= 0; level--)
{
int node_start, node_end;
basis_tree.getLevelRange(level, node_start, node_end);
int slices = level_slices[level];
int parent_slices = (level == 0 ? slices : level_slices[level - 1]);
int level_rank = basis_tree.level_data.getLevelRank(level);
#pragma omp parallel for
for (int node_index = node_start; node_index < node_end; node_index++)
{
int parent_index = basis_tree.parent[node_index];
int cluster_index = basis_tree.global_cluster_index[node_index];
int parent_cluster_index = kdtree.getParent(cluster_index);
if (parent_index == H2OPUS_EMPTY_NODE && parent_cluster_index == H2OPUS_EMPTY_NODE)
continue;
H2Opus_Real *transfer = basis_tree.getTransNode(level, node_index - node_start);
entry_gen.v_transfer_matrix(transfer, level_rank, kdtree, cluster_index, parent_cluster_index, slices,
parent_slices);
}
}
}
template <class T, int hw, typename EntryGen>
void generateHNodeEntries(THNodeTree<hw> &hnodes, TH2OpusKDTree<T, hw> &u_kdtree, TBasisTree<hw> &u_basis_tree,
TH2OpusKDTree<T, hw> &v_kdtree, TBasisTree<hw> &v_basis_tree, EntryGen &entry_gen,
std::vector<int> &level_slices)
{
int num_levels = hnodes.depth;
// Coupling matrices
for (int level = num_levels - 1; level >= 0; level--)
{
int node_start, node_end;
hnodes.getCouplingLevelRange(level, node_start, node_end);
int slices = level_slices[level];
int level_rank = hnodes.level_data.getLevelRank(level);
#pragma omp parallel for
for (int node_index = node_start; node_index < node_end; node_index++)
{
H2Opus_Real *coupling_matrix = hnodes.getCouplingMatrix(level, node_index - node_start);
int tree_index = hnodes.rank_leaf_tree_index[node_index];
int u_index = hnodes.node_u_index[tree_index];
int v_index = hnodes.node_v_index[tree_index];
int u_cluster_index = u_basis_tree.global_cluster_index[u_index];
int v_cluster_index = v_basis_tree.global_cluster_index[v_index];
entry_gen.coupling_matrix(coupling_matrix, level_rank, u_kdtree, u_cluster_index, v_kdtree, v_cluster_index,
slices);
}
}
// Dense matrices
int num_dense_leaves = hnodes.num_dense_leaves;
if (num_dense_leaves == 0)
return;
int ld = u_basis_tree.leaf_size;
assert(v_basis_tree.leaf_size == ld);
#pragma omp parallel for
for (int leaf = 0; leaf < num_dense_leaves; leaf++)
{
H2Opus_Real *dense_leaf = hnodes.getDenseMatrix(leaf);
int tree_index = hnodes.dense_leaf_tree_index[leaf];
int u_index = hnodes.node_u_index[tree_index];
int v_index = hnodes.node_v_index[tree_index];
int u_cluster_index = u_basis_tree.global_cluster_index[u_index];
int v_cluster_index = v_basis_tree.global_cluster_index[v_index];
entry_gen.dense_matrix(dense_leaf, ld, u_kdtree, u_cluster_index, v_kdtree, v_cluster_index);
}
}
template <class T, int hw, typename EntryGen>
void generateHMatrixEntries(THMatrix<hw> &hmatrix, TH2OpusKDTree<T, hw> &kdtree, EntryGen &entry_gen,
std::vector<int> &level_slices)
{
std::vector<int> level_ranks(level_slices.size(), pow(level_slices[0], kdtree.getDim()));
int leaf_size = kdtree.getLeafSize();
hmatrix.u_basis_tree.allocateMatrixData(&level_ranks[0], level_ranks.size(), leaf_size);
generateUBasisTreeEntries(hmatrix.u_basis_tree, kdtree, entry_gen, level_slices);
if (!hmatrix.sym)
{
hmatrix.v_basis_tree.allocateMatrixData(&level_ranks[0], level_ranks.size(), leaf_size);
generateVBasisTreeEntries(hmatrix.v_basis_tree, kdtree, entry_gen, level_slices);
}
hmatrix.hnodes.allocateMatrixData(hmatrix.u_basis_tree.level_data);
generateHNodeEntries(hmatrix.hnodes, kdtree, hmatrix.u_basis_tree, kdtree, hmatrix.u_basis_tree, entry_gen,
level_slices);
}
template <class T, int hw>
void buildHMatrixStructure(THMatrix<hw> &hmatrix, TH2OpusKDTree<T, hw> &kdtree,
TH2OpusAdmissibility<T, hw> &admissibility)
{
hmatrix.n = kdtree.getDataSet()->getDataSetSize();
TBasisTree<hw> &u_basis_tree = hmatrix.u_basis_tree;
TBasisTree<hw> &v_basis_tree = (hmatrix.sym ? hmatrix.u_basis_tree : hmatrix.v_basis_tree);
std::vector<int> v_list(1, 0);
u_basis_tree.generateStructureFromKDTree(kdtree, 0, true, kdtree.getDepth());
if (!hmatrix.sym)
hmatrix.v_basis_tree.copyStructureData(hmatrix.u_basis_tree);
hmatrix.hnodes.determineStructure(kdtree, admissibility, u_basis_tree, 0, v_basis_tree, 0, u_basis_tree.depth,
v_list);
hmatrix.hnodes.allocateBSRData(u_basis_tree, v_basis_tree, 0, 0);
hmatrix.hnodes.allocateBSNData(u_basis_tree, v_basis_tree, 0, 0);
}
template <class T, int hw>
void buildHMatrixStructure(THMatrix<hw> &hmatrix, H2OpusDataSet<T> *data_set, int leaf_size,
TH2OpusAdmissibility<T, hw> &admissibility)
{
TH2OpusKDTree<T, hw> kdtree(data_set, leaf_size);
kdtree.buildKDtreeMedianSplit();
buildHMatrixStructure<T, hw>(hmatrix, kdtree, admissibility);
}
template <class T, int hw, typename EntryGen>
void buildHMatrix(THMatrix<hw> &hmatrix, H2OpusDataSet<T> *data_set, TH2OpusAdmissibility<T, hw> &admissibility,
EntryGen &entry_gen, int leaf_size, int slices)
{
TH2OpusKDTree<T, hw> kdtree(data_set, leaf_size);
kdtree.buildKDtreeMedianSplit();
std::vector<int> level_slices(kdtree.getDepth(), slices);
buildHMatrixStructure(hmatrix, kdtree, admissibility);
generateHMatrixEntries(hmatrix, kdtree, entry_gen, level_slices);
}
#endif
|
kernel_cpu.c | #ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <omp.h> // (in path known to compiler) needed by openmp
#include <stdlib.h> // (in path known to compiler) needed by malloc
#include <stdio.h> // (in path known to compiler) needed by printf
#include <math.h> // (in path known to compiler) needed by exp
//======================================================================================================================================================150
// MAIN FUNCTION HEADER
//======================================================================================================================================================150
#include "./../lavaMD.h" // (in the main program folder) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL_CPU FUNCTION HEADER
//======================================================================================================================================================150
#include "kernel_cpu.h" // (in the current directory)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void kernel_cpu(par_str par, dim_str dim, box_str *box, FOUR_VECTOR *rv, fp *qv,
FOUR_VECTOR *fv) {
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
time0 = get_time();
// timer
long long time1;
long long time2;
long long time3;
long long time4;
// parameters
fp alpha;
fp a2;
// counters
int i, j, k, l;
// home box
long first_i;
FOUR_VECTOR *rA;
FOUR_VECTOR *fA;
// neighbor box
int pointer;
long first_j;
FOUR_VECTOR *rB;
fp *qB;
// common
fp r2;
fp u2;
fp fs;
fp vij;
fp fxij, fyij, fzij;
THREE_VECTOR d;
time1 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
time2 = get_time();
//======================================================================================================================================================150
// INPUTS
//======================================================================================================================================================150
alpha = par.alpha;
a2 = 2.0 * alpha * alpha;
time3 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
#pragma omp parallel for private(i, j, k) private(first_i, rA, fA) private( \
pointer, first_j, rB, qB) private(r2, u2, fs, vij, fxij, fyij, fzij, d)
for (l = 0; l < dim.number_boxes; l = l + 1) {
//------------------------------------------------------------------------------------------100
// home box - box parameters
//------------------------------------------------------------------------------------------100
first_i = box[l].offset; // offset to common arrays
//------------------------------------------------------------------------------------------100
// home box - distance, force, charge and type parameters from
//common arrays
//------------------------------------------------------------------------------------------100
rA = &rv[first_i];
fA = &fv[first_i];
//------------------------------------------------------------------------------------------100
// Do for the # of (home+neighbor) boxes
//------------------------------------------------------------------------------------------100
for (k = 0; k < (1 + box[l].nn); k++) {
//----------------------------------------50
// neighbor box - get pointer to the right box
//----------------------------------------50
if (k == 0) {
pointer = l; // set first box to be processed to home box
} else {
pointer = box[l].nei[k - 1].number; // remaining boxes are
// neighbor boxes
}
//----------------------------------------50
// neighbor box - box parameters
//----------------------------------------50
first_j = box[pointer].offset;
//----------------------------------------50
// neighbor box - distance, force, charge and type parameters
//----------------------------------------50
rB = &rv[first_j];
qB = &qv[first_j];
//----------------------------------------50
// Do for the # of particles in home box
//----------------------------------------50
for (i = 0; i < NUMBER_PAR_PER_BOX; i = i + 1) {
// do for the # of particles in current (home or neighbor) box
for (j = 0; j < NUMBER_PAR_PER_BOX; j = j + 1) {
// // coefficients
r2 = rA[i].v + rB[j].v - DOT(rA[i], rB[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs = 2. * vij;
d.x = rA[i].x - rB[j].x;
d.y = rA[i].y - rB[j].y;
d.z = rA[i].z - rB[j].z;
fxij = fs * d.x;
fyij = fs * d.y;
fzij = fs * d.z;
// forces
fA[i].v += qB[j] * vij;
fA[i].x += qB[j] * fxij;
fA[i].y += qB[j] * fyij;
fA[i].z += qB[j] * fzij;
} // for j
} // for i
} // for k
} // for l
time4 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : CPU/MCPU: VARIABLES\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time4 - time0) * 100);
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time4 - time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: INPUTS\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time4 - time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n",
(float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time4 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time4 - time0) / 1000000);
} // main
#ifdef __cplusplus
}
#endif
|
convolution_3x3_pack4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4b-4a-inch/4a-64-outch/4b;
kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack4.channel(q / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00[8] = (__fp16)k01[k];
g00[9] = (__fp16)k11[k];
g00[10] = (__fp16)k21[k];
g00[11] = (__fp16)k31[k];
g00[12] = (__fp16)k41[k];
g00[13] = (__fp16)k51[k];
g00[14] = (__fp16)k61[k];
g00[15] = (__fp16)k71[k];
g00[16] = (__fp16)k02[k];
g00[17] = (__fp16)k12[k];
g00[18] = (__fp16)k22[k];
g00[19] = (__fp16)k32[k];
g00[20] = (__fp16)k42[k];
g00[21] = (__fp16)k52[k];
g00[22] = (__fp16)k62[k];
g00[23] = (__fp16)k72[k];
g00[24] = (__fp16)k03[k];
g00[25] = (__fp16)k13[k];
g00[26] = (__fp16)k23[k];
g00[27] = (__fp16)k33[k];
g00[28] = (__fp16)k43[k];
g00[29] = (__fp16)k53[k];
g00[30] = (__fp16)k63[k];
g00[31] = (__fp16)k73[k];
g00 += 32;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k01[k];
g00[5] = (__fp16)k11[k];
g00[6] = (__fp16)k21[k];
g00[7] = (__fp16)k31[k];
g00[8] = (__fp16)k02[k];
g00[9] = (__fp16)k12[k];
g00[10] = (__fp16)k22[k];
g00[11] = (__fp16)k32[k];
g00[12] = (__fp16)k03[k];
g00[13] = (__fp16)k13[k];
g00[14] = (__fp16)k23[k];
g00[15] = (__fp16)k33[k];
g00 += 16;
}
}
}
}
static void conv3x3s1_winograd64_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd64_transform_input_pack4_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
__fp16* output0_tm = top_blob_tm.channel(p);
__fp16* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123
"fmla v24.8h, v4.8h, v0.h[0] \n"
"fmla v25.8h, v4.8h, v0.h[1] \n"
"fmla v26.8h, v4.8h, v0.h[2] \n"
"fmla v27.8h, v4.8h, v0.h[3] \n"
"fmla v28.8h, v4.8h, v0.h[4] \n"
"fmla v29.8h, v4.8h, v0.h[5] \n"
"fmla v30.8h, v4.8h, v0.h[6] \n"
"fmla v31.8h, v4.8h, v0.h[7] \n"
"fmla v24.8h, v5.8h, v1.h[0] \n"
"fmla v25.8h, v5.8h, v1.h[1] \n"
"fmla v26.8h, v5.8h, v1.h[2] \n"
"fmla v27.8h, v5.8h, v1.h[3] \n"
"fmla v28.8h, v5.8h, v1.h[4] \n"
"fmla v29.8h, v5.8h, v1.h[5] \n"
"fmla v30.8h, v5.8h, v1.h[6] \n"
"fmla v31.8h, v5.8h, v1.h[7] \n"
"fmla v24.8h, v6.8h, v2.h[0] \n"
"fmla v25.8h, v6.8h, v2.h[1] \n"
"fmla v26.8h, v6.8h, v2.h[2] \n"
"fmla v27.8h, v6.8h, v2.h[3] \n"
"fmla v28.8h, v6.8h, v2.h[4] \n"
"fmla v29.8h, v6.8h, v2.h[5] \n"
"fmla v30.8h, v6.8h, v2.h[6] \n"
"fmla v31.8h, v6.8h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v7.8h, v3.h[0] \n"
"fmla v25.8h, v7.8h, v3.h[1] \n"
"fmla v26.8h, v7.8h, v3.h[2] \n"
"fmla v27.8h, v7.8h, v3.h[3] \n"
"fmla v28.8h, v7.8h, v3.h[4] \n"
"fmla v29.8h, v7.8h, v3.h[5] \n"
"fmla v30.8h, v7.8h, v3.h[6] \n"
"fmla v31.8h, v7.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"ext v28.16b, v28.16b, v28.16b, #8 \n"
"ext v29.16b, v29.16b, v29.16b, #8 \n"
"ext v30.16b, v30.16b, v30.16b, #8 \n"
"ext v31.16b, v31.16b, v31.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123
"fmla v24.8h, v4.8h, v0.h[0] \n"
"fmla v25.8h, v4.8h, v0.h[1] \n"
"fmla v26.8h, v4.8h, v0.h[2] \n"
"fmla v27.8h, v4.8h, v0.h[3] \n"
"fmla v24.8h, v5.8h, v1.h[0] \n"
"fmla v25.8h, v5.8h, v1.h[1] \n"
"fmla v26.8h, v5.8h, v1.h[2] \n"
"fmla v27.8h, v5.8h, v1.h[3] \n"
"fmla v24.8h, v6.8h, v2.h[0] \n"
"fmla v25.8h, v6.8h, v2.h[1] \n"
"fmla v26.8h, v6.8h, v2.h[2] \n"
"fmla v27.8h, v6.8h, v2.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v7.8h, v3.h[0] \n"
"fmla v25.8h, v7.8h, v3.h[1] \n"
"fmla v26.8h, v7.8h, v3.h[2] \n"
"fmla v27.8h, v7.8h, v3.h[3] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
float16x8_t _sum0 = vdupq_n_f16(0.f);
for (int q = 0; q < inch; q++)
{
float16x4_t _r0 = vld1_f16(r0);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
_sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3);
kptr += 32;
r0 += 4;
}
vst1_f16(output0_tm, vget_low_f16(_sum0));
vst1_f16(output1_tm, vget_high_f16(_sum0));
output0_tm += 4;
output1_tm += 4;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123
"fmla v24.4h, v4.4h, v0.h[0] \n"
"fmla v25.4h, v4.4h, v0.h[1] \n"
"fmla v26.4h, v4.4h, v0.h[2] \n"
"fmla v27.4h, v4.4h, v0.h[3] \n"
"fmla v28.4h, v4.4h, v0.h[4] \n"
"fmla v29.4h, v4.4h, v0.h[5] \n"
"fmla v30.4h, v4.4h, v0.h[6] \n"
"fmla v31.4h, v4.4h, v0.h[7] \n"
"fmla v24.4h, v5.4h, v1.h[0] \n"
"fmla v25.4h, v5.4h, v1.h[1] \n"
"fmla v26.4h, v5.4h, v1.h[2] \n"
"fmla v27.4h, v5.4h, v1.h[3] \n"
"fmla v28.4h, v5.4h, v1.h[4] \n"
"fmla v29.4h, v5.4h, v1.h[5] \n"
"fmla v30.4h, v5.4h, v1.h[6] \n"
"fmla v31.4h, v5.4h, v1.h[7] \n"
"fmla v24.4h, v6.4h, v2.h[0] \n"
"fmla v25.4h, v6.4h, v2.h[1] \n"
"fmla v26.4h, v6.4h, v2.h[2] \n"
"fmla v27.4h, v6.4h, v2.h[3] \n"
"fmla v28.4h, v6.4h, v2.h[4] \n"
"fmla v29.4h, v6.4h, v2.h[5] \n"
"fmla v30.4h, v6.4h, v2.h[6] \n"
"fmla v31.4h, v6.4h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v7.4h, v3.h[0] \n"
"fmla v25.4h, v7.4h, v3.h[1] \n"
"fmla v26.4h, v7.4h, v3.h[2] \n"
"fmla v27.4h, v7.4h, v3.h[3] \n"
"fmla v28.4h, v7.4h, v3.h[4] \n"
"fmla v29.4h, v7.4h, v3.h[5] \n"
"fmla v30.4h, v7.4h, v3.h[6] \n"
"fmla v31.4h, v7.4h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123
"fmla v24.4h, v4.4h, v0.h[0] \n"
"fmla v25.4h, v4.4h, v0.h[1] \n"
"fmla v26.4h, v4.4h, v0.h[2] \n"
"fmla v27.4h, v4.4h, v0.h[3] \n"
"fmla v24.4h, v5.4h, v1.h[0] \n"
"fmla v25.4h, v5.4h, v1.h[1] \n"
"fmla v26.4h, v5.4h, v1.h[2] \n"
"fmla v27.4h, v5.4h, v1.h[3] \n"
"fmla v24.4h, v6.4h, v2.h[0] \n"
"fmla v25.4h, v6.4h, v2.h[1] \n"
"fmla v26.4h, v6.4h, v2.h[2] \n"
"fmla v27.4h, v6.4h, v2.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v7.4h, v3.h[0] \n"
"fmla v25.4h, v7.4h, v3.h[1] \n"
"fmla v26.4h, v7.4h, v3.h[2] \n"
"fmla v27.4h, v7.4h, v3.h[3] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
float16x4_t _sum0 = vdup_n_f16(0.f);
for (int q = 0; q < inch; q++)
{
float16x4_t _r0 = vld1_f16(r0);
float16x4_t _k0 = vld1_f16(kptr);
float16x4_t _k1 = vld1_f16(kptr + 4);
float16x4_t _k2 = vld1_f16(kptr + 8);
float16x4_t _k3 = vld1_f16(kptr + 12);
_sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0);
_sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1);
_sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2);
_sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3);
kptr += 16;
r0 += 4;
}
vst1_f16(output0_tm, _sum0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator);
}
{
conv3x3s1_winograd64_transform_output_pack4_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f);
out0.fill(_bias0);
int q = 0;
for (; q < inch; q++)
{
__fp16* outptr0 = out0.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
// 16 * 9
float16x8_t _k00_01 = vld1q_f16(kptr);
float16x8_t _k00_23 = vld1q_f16(kptr + 8);
float16x8_t _k01_01 = vld1q_f16(kptr + 16);
float16x8_t _k01_23 = vld1q_f16(kptr + 24);
float16x8_t _k02_01 = vld1q_f16(kptr + 32);
float16x8_t _k02_23 = vld1q_f16(kptr + 40);
float16x8_t _k10_01 = vld1q_f16(kptr + 48);
float16x8_t _k10_23 = vld1q_f16(kptr + 56);
float16x8_t _k11_01 = vld1q_f16(kptr + 64);
float16x8_t _k11_23 = vld1q_f16(kptr + 72);
float16x8_t _k12_01 = vld1q_f16(kptr + 80);
float16x8_t _k12_23 = vld1q_f16(kptr + 88);
float16x8_t _k20_01 = vld1q_f16(kptr + 96);
float16x8_t _k20_23 = vld1q_f16(kptr + 104);
float16x8_t _k21_01 = vld1q_f16(kptr + 112);
float16x8_t _k21_23 = vld1q_f16(kptr + 120);
float16x8_t _k22_01 = vld1q_f16(kptr + 128);
float16x8_t _k22_23 = vld1q_f16(kptr + 136);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02 r03 r04 r05
"ext v6.16b, %8.16b, %8.16b, #8 \n"
"fmla v10.4h, %8.4h, v0.h[0] \n"
"fmla v11.4h, %8.4h, v0.h[4] \n"
"fmla v12.4h, %8.4h, v1.h[0] \n"
"fmla v13.4h, %8.4h, v1.h[4] \n"
"fmla v10.4h, v6.4h, v0.h[1] \n"
"fmla v11.4h, v6.4h, v0.h[5] \n"
"fmla v12.4h, v6.4h, v1.h[1] \n"
"fmla v13.4h, v6.4h, v1.h[5] \n"
"ext v7.16b, %9.16b, %9.16b, #8 \n"
"fmla v10.4h, %9.4h, v0.h[2] \n"
"fmla v11.4h, %9.4h, v0.h[6] \n"
"fmla v12.4h, %9.4h, v1.h[2] \n"
"fmla v13.4h, %9.4h, v1.h[6] \n"
"fmla v10.4h, v7.4h, v0.h[3] \n"
"fmla v11.4h, v7.4h, v0.h[7] \n"
"fmla v12.4h, v7.4h, v1.h[3] \n"
"fmla v13.4h, v7.4h, v1.h[7] \n"
"ext v8.16b, %10.16b, %10.16b, #8 \n"
"fmla v10.4h, %10.4h, v0.h[4] \n"
"fmla v11.4h, %10.4h, v1.h[0] \n"
"fmla v12.4h, %10.4h, v1.h[4] \n"
"fmla v13.4h, %10.4h, v2.h[0] \n"
"fmla v10.4h, v8.4h, v0.h[5] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"fmla v12.4h, v8.4h, v1.h[5] \n"
"fmla v13.4h, v8.4h, v2.h[1] \n"
"ext v9.16b, %11.16b, %11.16b, #8 \n"
"fmla v10.4h, %11.4h, v0.h[6] \n"
"fmla v11.4h, %11.4h, v1.h[2] \n"
"fmla v12.4h, %11.4h, v1.h[6] \n"
"fmla v13.4h, %11.4h, v2.h[2] \n"
"fmla v10.4h, v9.4h, v0.h[7] \n"
"fmla v11.4h, v9.4h, v1.h[3] \n"
"fmla v12.4h, v9.4h, v1.h[7] \n"
"fmla v13.4h, v9.4h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12 r13 r14 r15
"ext v6.16b, %12.16b, %12.16b, #8 \n"
"fmla v10.4h, %12.4h, v1.h[0] \n"
"fmla v11.4h, %12.4h, v1.h[4] \n"
"fmla v12.4h, %12.4h, v2.h[0] \n"
"fmla v13.4h, %12.4h, v2.h[4] \n"
"fmla v10.4h, v6.4h, v1.h[1] \n"
"fmla v11.4h, v6.4h, v1.h[5] \n"
"fmla v12.4h, v6.4h, v2.h[1] \n"
"fmla v13.4h, v6.4h, v2.h[5] \n"
"ext v7.16b, %13.16b, %13.16b, #8 \n"
"fmla v10.4h, %13.4h, v1.h[2] \n"
"fmla v11.4h, %13.4h, v1.h[6] \n"
"fmla v12.4h, %13.4h, v2.h[2] \n"
"fmla v13.4h, %13.4h, v2.h[6] \n"
"fmla v10.4h, v7.4h, v1.h[3] \n"
"fmla v11.4h, v7.4h, v1.h[7] \n"
"fmla v12.4h, v7.4h, v2.h[3] \n"
"fmla v13.4h, v7.4h, v2.h[7] \n"
"ext v8.16b, %14.16b, %14.16b, #8 \n"
"fmla v10.4h, %14.4h, v3.h[0] \n"
"fmla v11.4h, %14.4h, v3.h[4] \n"
"fmla v12.4h, %14.4h, v4.h[0] \n"
"fmla v13.4h, %14.4h, v4.h[4] \n"
"fmla v10.4h, v8.4h, v3.h[1] \n"
"fmla v11.4h, v8.4h, v3.h[5] \n"
"fmla v12.4h, v8.4h, v4.h[1] \n"
"fmla v13.4h, v8.4h, v4.h[5] \n"
"ext v9.16b, %15.16b, %15.16b, #8 \n"
"fmla v10.4h, %15.4h, v3.h[2] \n"
"fmla v11.4h, %15.4h, v3.h[6] \n"
"fmla v12.4h, %15.4h, v4.h[2] \n"
"fmla v13.4h, %15.4h, v4.h[6] \n"
"fmla v10.4h, v9.4h, v3.h[3] \n"
"fmla v11.4h, v9.4h, v3.h[7] \n"
"fmla v12.4h, v9.4h, v4.h[3] \n"
"fmla v13.4h, v9.4h, v4.h[7] \n"
"ext v6.16b, %16.16b, %16.16b, #8 \n"
"fmla v10.4h, %16.4h, v3.h[4] \n"
"fmla v11.4h, %16.4h, v4.h[0] \n"
"fmla v12.4h, %16.4h, v4.h[4] \n"
"fmla v13.4h, %16.4h, v5.h[0] \n"
"fmla v10.4h, v6.4h, v3.h[5] \n"
"fmla v11.4h, v6.4h, v4.h[1] \n"
"fmla v12.4h, v6.4h, v4.h[5] \n"
"fmla v13.4h, v6.4h, v5.h[1] \n"
"ext v7.16b, %17.16b, %17.16b, #8 \n"
"fmla v10.4h, %17.4h, v3.h[6] \n"
"fmla v11.4h, %17.4h, v4.h[2] \n"
"fmla v12.4h, %17.4h, v4.h[6] \n"
"fmla v13.4h, %17.4h, v5.h[2] \n"
"fmla v10.4h, v7.4h, v3.h[7] \n"
"fmla v11.4h, v7.4h, v4.h[3] \n"
"fmla v12.4h, v7.4h, v4.h[7] \n"
"fmla v13.4h, v7.4h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22 r23 r24 r25
"ext v8.16b, %18.16b, %18.16b, #8 \n"
"fmla v10.4h, %18.4h, v4.h[0] \n"
"fmla v11.4h, %18.4h, v4.h[4] \n"
"fmla v12.4h, %18.4h, v5.h[0] \n"
"fmla v13.4h, %18.4h, v5.h[4] \n"
"fmla v10.4h, v8.4h, v4.h[1] \n"
"fmla v11.4h, v8.4h, v4.h[5] \n"
"fmla v12.4h, v8.4h, v5.h[1] \n"
"fmla v13.4h, v8.4h, v5.h[5] \n"
"ext v9.16b, %19.16b, %19.16b, #8 \n"
"fmla v10.4h, %19.4h, v4.h[2] \n"
"fmla v11.4h, %19.4h, v4.h[6] \n"
"fmla v12.4h, %19.4h, v5.h[2] \n"
"fmla v13.4h, %19.4h, v5.h[6] \n"
"fmla v10.4h, v9.4h, v4.h[3] \n"
"fmla v11.4h, v9.4h, v4.h[7] \n"
"fmla v12.4h, v9.4h, v5.h[3] \n"
"fmla v13.4h, v9.4h, v5.h[7] \n"
"ext v6.16b, %20.16b, %20.16b, #8 \n"
"fmla v10.4h, %20.4h, v0.h[0] \n"
"fmla v11.4h, %20.4h, v0.h[4] \n"
"fmla v12.4h, %20.4h, v1.h[0] \n"
"fmla v13.4h, %20.4h, v1.h[4] \n"
"fmla v10.4h, v6.4h, v0.h[1] \n"
"fmla v11.4h, v6.4h, v0.h[5] \n"
"fmla v12.4h, v6.4h, v1.h[1] \n"
"fmla v13.4h, v6.4h, v1.h[5] \n"
"ext v7.16b, %21.16b, %21.16b, #8 \n"
"fmla v10.4h, %21.4h, v0.h[2] \n"
"fmla v11.4h, %21.4h, v0.h[6] \n"
"fmla v12.4h, %21.4h, v1.h[2] \n"
"fmla v13.4h, %21.4h, v1.h[6] \n"
"fmla v10.4h, v7.4h, v0.h[3] \n"
"fmla v11.4h, v7.4h, v0.h[7] \n"
"fmla v12.4h, v7.4h, v1.h[3] \n"
"fmla v13.4h, v7.4h, v1.h[7] \n"
"ext v8.16b, %22.16b, %22.16b, #8 \n"
"fmla v10.4h, %22.4h, v0.h[4] \n"
"fmla v11.4h, %22.4h, v1.h[0] \n"
"fmla v12.4h, %22.4h, v1.h[4] \n"
"fmla v13.4h, %22.4h, v2.h[0] \n"
"fmla v10.4h, v8.4h, v0.h[5] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"fmla v12.4h, v8.4h, v1.h[5] \n"
"fmla v13.4h, v8.4h, v2.h[1] \n"
"ext v9.16b, %23.16b, %23.16b, #8 \n"
"fmla v10.4h, %23.4h, v0.h[6] \n"
"fmla v11.4h, %23.4h, v1.h[2] \n"
"fmla v12.4h, %23.4h, v1.h[6] \n"
"fmla v13.4h, %23.4h, v2.h[2] \n"
"fmla v10.4h, v9.4h, v0.h[7] \n"
"fmla v11.4h, v9.4h, v1.h[3] \n"
"fmla v12.4h, v9.4h, v1.h[7] \n"
"fmla v13.4h, v9.4h, v2.h[3] \n"
"ext v6.16b, %24.16b, %24.16b, #8 \n"
"fmla v10.4h, %24.4h, v1.h[0] \n"
"fmla v11.4h, %24.4h, v1.h[4] \n"
"fmla v12.4h, %24.4h, v2.h[0] \n"
"fmla v13.4h, %24.4h, v2.h[4] \n"
"add %1, %1, #32 \n"
"fmla v10.4h, v6.4h, v1.h[1] \n"
"fmla v11.4h, v6.4h, v1.h[5] \n"
"fmla v12.4h, v6.4h, v2.h[1] \n"
"fmla v13.4h, v6.4h, v2.h[5] \n"
"ext v7.16b, %25.16b, %25.16b, #8 \n"
"fmla v10.4h, %25.4h, v1.h[2] \n"
"fmla v11.4h, %25.4h, v1.h[6] \n"
"fmla v12.4h, %25.4h, v2.h[2] \n"
"fmla v13.4h, %25.4h, v2.h[6] \n"
"add %2, %2, #32 \n"
"fmla v10.4h, v7.4h, v1.h[3] \n"
"fmla v11.4h, v7.4h, v1.h[7] \n"
"fmla v12.4h, v7.4h, v2.h[3] \n"
"fmla v13.4h, v7.4h, v2.h[7] \n"
"add %3, %3, #32 \n"
"st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.8h, v1.8h}, [%1] \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v12.4h, v13.4h}, [%0] \n" // sum0 sum1
"ext v4.16b, %8.16b, %8.16b, #8 \n"
"fmul v10.4h, %8.4h, v0.h[0] \n"
"fmul v11.4h, %8.4h, v0.h[4] \n"
"fmla v12.4h, v4.4h, v0.h[1] \n"
"fmla v13.4h, v4.4h, v0.h[5] \n"
"ext v5.16b, %9.16b, %9.16b, #8 \n"
"fmla v10.4h, %9.4h, v0.h[2] \n"
"fmla v11.4h, %9.4h, v0.h[6] \n"
"fmla v12.4h, v5.4h, v0.h[3] \n"
"fmla v13.4h, v5.4h, v0.h[7] \n"
"ext v6.16b, %10.16b, %10.16b, #8 \n"
"fmla v10.4h, %10.4h, v0.h[4] \n"
"fmla v11.4h, %10.4h, v1.h[0] \n"
"fmla v12.4h, v6.4h, v0.h[5] \n"
"fmla v13.4h, v6.4h, v1.h[1] \n"
"ext v7.16b, %11.16b, %11.16b, #8 \n"
"fmla v10.4h, %11.4h, v0.h[6] \n"
"fmla v11.4h, %11.4h, v1.h[2] \n"
"fmla v12.4h, v7.4h, v0.h[7] \n"
"fmla v13.4h, v7.4h, v1.h[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v2.8h, v3.8h}, [%2] \n" // r10 r11 r12 r13
"ext v8.16b, %12.16b, %12.16b, #8 \n"
"fmla v10.4h, %12.4h, v1.h[0] \n"
"fmla v11.4h, %12.4h, v1.h[4] \n"
"fmla v12.4h, v8.4h, v1.h[1] \n"
"fmla v13.4h, v8.4h, v1.h[5] \n"
"ext v9.16b, %13.16b, %13.16b, #8 \n"
"fmla v10.4h, %13.4h, v1.h[2] \n"
"fmla v11.4h, %13.4h, v1.h[6] \n"
"fmla v12.4h, v9.4h, v1.h[3] \n"
"fmla v13.4h, v9.4h, v1.h[7] \n"
"ext v4.16b, %14.16b, %14.16b, #8 \n"
"fmla v10.4h, %14.4h, v2.h[0] \n"
"fmla v11.4h, %14.4h, v2.h[4] \n"
"fmla v12.4h, v4.4h, v2.h[1] \n"
"fmla v13.4h, v4.4h, v2.h[5] \n"
"ext v5.16b, %15.16b, %15.16b, #8 \n"
"fmla v10.4h, %15.4h, v2.h[2] \n"
"fmla v11.4h, %15.4h, v2.h[6] \n"
"fmla v12.4h, v5.4h, v2.h[3] \n"
"fmla v13.4h, v5.4h, v2.h[7] \n"
"ext v6.16b, %16.16b, %16.16b, #8 \n"
"fmla v10.4h, %16.4h, v2.h[4] \n"
"fmla v11.4h, %16.4h, v3.h[0] \n"
"fmla v12.4h, v6.4h, v2.h[5] \n"
"fmla v13.4h, v6.4h, v3.h[1] \n"
"ext v7.16b, %17.16b, %17.16b, #8 \n"
"fmla v10.4h, %17.4h, v2.h[6] \n"
"fmla v11.4h, %17.4h, v3.h[2] \n"
"fmla v12.4h, v7.4h, v2.h[7] \n"
"fmla v13.4h, v7.4h, v3.h[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.8h, v1.8h}, [%3] \n" // r20 r21 r22 r23
"ext v8.16b, %18.16b, %18.16b, #8 \n"
"fmla v10.4h, %18.4h, v3.h[0] \n"
"fmla v11.4h, %18.4h, v3.h[4] \n"
"fmla v12.4h, v8.4h, v3.h[1] \n"
"fmla v13.4h, v8.4h, v3.h[5] \n"
"ext v9.16b, %19.16b, %19.16b, #8 \n"
"fmla v10.4h, %19.4h, v3.h[2] \n"
"fmla v11.4h, %19.4h, v3.h[6] \n"
"fmla v12.4h, v9.4h, v3.h[3] \n"
"fmla v13.4h, v9.4h, v3.h[7] \n"
"ext v4.16b, %20.16b, %20.16b, #8 \n"
"fmla v10.4h, %20.4h, v0.h[0] \n"
"fmla v11.4h, %20.4h, v0.h[4] \n"
"fmla v12.4h, v4.4h, v0.h[1] \n"
"fmla v13.4h, v4.4h, v0.h[5] \n"
"ext v5.16b, %21.16b, %21.16b, #8 \n"
"fmla v10.4h, %21.4h, v0.h[2] \n"
"fmla v11.4h, %21.4h, v0.h[6] \n"
"fmla v12.4h, v5.4h, v0.h[3] \n"
"fmla v13.4h, v5.4h, v0.h[7] \n"
"ext v6.16b, %22.16b, %22.16b, #8 \n"
"fmla v10.4h, %22.4h, v0.h[4] \n"
"fmla v11.4h, %22.4h, v1.h[0] \n"
"fmla v12.4h, v6.4h, v0.h[5] \n"
"fmla v13.4h, v6.4h, v1.h[1] \n"
"ext v7.16b, %23.16b, %23.16b, #8 \n"
"fmla v10.4h, %23.4h, v0.h[6] \n"
"fmla v11.4h, %23.4h, v1.h[2] \n"
"fmla v12.4h, v7.4h, v0.h[7] \n"
"fmla v13.4h, v7.4h, v1.h[3] \n"
"ext v8.16b, %24.16b, %24.16b, #8 \n"
"fmla v10.4h, %24.4h, v1.h[0] \n"
"fmla v11.4h, %24.4h, v1.h[4] \n"
"fmla v12.4h, v8.4h, v1.h[1] \n"
"fmla v13.4h, v8.4h, v1.h[5] \n"
"ext v9.16b, %25.16b, %25.16b, #8 \n"
"fmla v10.4h, %25.4h, v1.h[2] \n"
"fmla v11.4h, %25.4h, v1.h[6] \n"
"fmla v12.4h, v9.4h, v1.h[3] \n"
"fmla v13.4h, v9.4h, v1.h[7] \n"
"add %1, %1, #16 \n"
"fadd v10.4h, v10.4h, v12.4h \n"
"add %2, %2, #16 \n"
"fadd v11.4h, v11.4h, v13.4h \n"
"add %3, %3, #16 \n"
"st1 {v10.4h, v11.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v13.4h}, [%0] \n" // sum0
"ext v6.16b, %8.16b, %8.16b, #8 \n"
"fmul v10.4h, %8.4h, v0.h[0] \n"
"fmul v11.4h, v6.4h, v0.h[1] \n"
"ext v7.16b, %9.16b, %9.16b, #8 \n"
"fmul v12.4h, %9.4h, v0.h[2] \n"
"fmla v13.4h, v7.4h, v0.h[3] \n"
"ext v8.16b, %10.16b, %10.16b, #8 \n"
"fmla v10.4h, %10.4h, v1.h[0] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"ext v9.16b, %11.16b, %11.16b, #8 \n"
"fmla v12.4h, %11.4h, v1.h[2] \n"
"fmla v13.4h, v9.4h, v1.h[3] \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v3.4h, v4.4h, v5.4h}, [%2] \n" // r10 r11 r12
"ext v6.16b, %12.16b, %12.16b, #8 \n"
"fmla v10.4h, %12.4h, v2.h[0] \n"
"fmla v11.4h, v6.4h, v2.h[1] \n"
"ext v7.16b, %13.16b, %13.16b, #8 \n"
"fmla v12.4h, %13.4h, v2.h[2] \n"
"fmla v13.4h, v7.4h, v2.h[3] \n"
"ext v8.16b, %14.16b, %14.16b, #8 \n"
"fmla v10.4h, %14.4h, v3.h[0] \n"
"fmla v11.4h, v8.4h, v3.h[1] \n"
"ext v9.16b, %15.16b, %15.16b, #8 \n"
"fmla v12.4h, %15.4h, v3.h[2] \n"
"fmla v13.4h, v9.4h, v3.h[3] \n"
"ext v6.16b, %16.16b, %16.16b, #8 \n"
"fmla v10.4h, %16.4h, v4.h[0] \n"
"fmla v11.4h, v6.4h, v4.h[1] \n"
"ext v7.16b, %17.16b, %17.16b, #8 \n"
"fmla v12.4h, %17.4h, v4.h[2] \n"
"fmla v13.4h, v7.4h, v4.h[3] \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v0.4h, v1.4h, v2.4h}, [%3] \n" // r20 r21 r22
"ext v8.16b, %18.16b, %18.16b, #8 \n"
"fmla v10.4h, %18.4h, v5.h[0] \n"
"fmla v11.4h, v8.4h, v5.h[1] \n"
"ext v9.16b, %19.16b, %19.16b, #8 \n"
"fmla v12.4h, %19.4h, v5.h[2] \n"
"fmla v13.4h, v9.4h, v5.h[3] \n"
"ext v6.16b, %20.16b, %20.16b, #8 \n"
"fmla v10.4h, %20.4h, v0.h[0] \n"
"fmla v11.4h, v6.4h, v0.h[1] \n"
"ext v7.16b, %21.16b, %21.16b, #8 \n"
"fmla v12.4h, %21.4h, v0.h[2] \n"
"fmla v13.4h, v7.4h, v0.h[3] \n"
"ext v8.16b, %22.16b, %22.16b, #8 \n"
"fmla v10.4h, %22.4h, v1.h[0] \n"
"fmla v11.4h, v8.4h, v1.h[1] \n"
"ext v9.16b, %23.16b, %23.16b, #8 \n"
"fmla v12.4h, %23.4h, v1.h[2] \n"
"fmla v13.4h, v9.4h, v1.h[3] \n"
"ext v6.16b, %24.16b, %24.16b, #8 \n"
"fmla v10.4h, %24.4h, v2.h[0] \n"
"fmla v11.4h, v6.4h, v2.h[1] \n"
"ext v7.16b, %25.16b, %25.16b, #8 \n"
"fmla v12.4h, %25.4h, v2.h[2] \n"
"fmla v13.4h, v7.4h, v2.h[3] \n"
"fadd v10.4h, v10.4h, v11.4h \n"
"add %1, %1, #8 \n"
"fadd v12.4h, v12.4h, v13.4h \n"
"add %2, %2, #8 \n"
"fadd v10.4h, v10.4h, v12.4h \n"
"add %3, %3, #8 \n"
"st1 {v10.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_01), // %8
"w"(_k00_23), // %9
"w"(_k01_01), // %10
"w"(_k01_23), // %11
"w"(_k02_01), // %12
"w"(_k02_23), // %13
"w"(_k10_01), // %14
"w"(_k10_23), // %15
"w"(_k11_01), // %16
"w"(_k11_23), // %17
"w"(_k12_01), // %18
"w"(_k12_23), // %19
"w"(_k20_01), // %20
"w"(_k20_23), // %21
"w"(_k21_01), // %22
"w"(_k21_23), // %23
"w"(_k22_01), // %24
"w"(_k22_23) // %25
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
r0 += 8;
r1 += 8;
r2 += 8;
}
}
}
}
|
randomwalks_cpu.h | /*!
* Copyright (c) 2018 by Contributors
* \file graph/sampler/generic_randomwalk_cpu.h
* \brief DGL sampler - templated implementation definition of random walks on CPU
*/
#ifndef DGL_GRAPH_SAMPLING_RANDOMWALKS_RANDOMWALKS_CPU_H_
#define DGL_GRAPH_SAMPLING_RANDOMWALKS_RANDOMWALKS_CPU_H_
#include <dgl/base_heterograph.h>
#include <dgl/array.h>
#include "randomwalks_impl.h"
namespace dgl {
using namespace dgl::runtime;
using namespace dgl::aten;
namespace sampling {
namespace impl {
namespace {
/*!
* \brief Generic Random Walk.
* \param seeds A 1D array of seed nodes, with the type the source type of the first
* edge type in the metapath.
* \param max_num_steps The maximum number of steps of a random walk path.
* \param step The random walk step function with type \c StepFunc.
* \return A 2D array of shape (len(seeds), max_num_steps + 1) with node IDs.
* \note The graph itself should be bounded in the closure of \c step.
*/
template<DLDeviceType XPU, typename IdxType>
IdArray GenericRandomWalk(
const IdArray seeds,
int64_t max_num_steps,
StepFunc<IdxType> step) {
int64_t num_seeds = seeds->shape[0];
int64_t trace_length = max_num_steps + 1;
IdArray traces = IdArray::Empty({num_seeds, trace_length}, seeds->dtype, seeds->ctx);
const IdxType *seed_data = static_cast<IdxType *>(seeds->data);
IdxType *traces_data = static_cast<IdxType *>(traces->data);
#pragma omp parallel for
for (int64_t seed_id = 0; seed_id < num_seeds; ++seed_id) {
int64_t i;
dgl_id_t curr = seed_data[seed_id];
traces_data[seed_id * trace_length] = curr;
for (i = 0; i < max_num_steps; ++i) {
const auto &succ = step(traces_data + seed_id * max_num_steps, curr, i);
traces_data[seed_id * trace_length + i + 1] = curr = succ.first;
if (succ.second)
break;
}
for (; i < max_num_steps; ++i)
traces_data[seed_id * trace_length + i + 1] = -1;
}
return traces;
}
}; // namespace
}; // namespace impl
}; // namespace sampling
}; // namespace dgl
#endif // DGL_GRAPH_SAMPLING_RANDOMWALKS_RANDOMWALKS_CPU_H_
|
cvAdvDiff_bnd_omp.c | /* -----------------------------------------------------------------
* Programmer(s): Daniel Reynolds and Ting Yan @ SMU
* Based on cvAdvDiff_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* solved using CVODE.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the SUNBAND linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value or the number of threads from the
* environment value, run without arguments:
* % ./cvAdvDiff_bnd_omp
* The environment variable can be over-ridden with a command line
* argument specifying the number of threads to use, e.g:
* % ./cvAdvDiff_bnd_omp 5
* ----------------------------------------------------------------- */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Header files with a description of contents */
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <cvode/cvode_direct.h> /* access to CVDls interface */
#include <sundials/sundials_types.h> /* definition of type realtype */
#include <sundials/sundials_math.h> /* definition of ABS and EXP */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants */
#define XMAX RCONST(2.0) /* domain boundaries */
#define YMAX RCONST(1.0)
#define MX 10 /* mesh dimensions */
#define MY 5
#define NEQ MX*MY /* number of equations */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/* User-defined vector access macro IJth */
/* IJth is defined in order to isolate the translation from the
mathematical 2-dimensional structure of the dependent variable vector
to the underlying 1-dimensional storage.
IJth(vdata,i,j) references the element in the vdata array for
u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY.
The vdata array is obtained via the macro call vdata = NV_DATA_S(v),
where v is an N_Vector.
The variables are ordered by the y index j, then by the x index i. */
#define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY])
/* Type : UserData (contains grid constants) */
typedef struct {
realtype dx, dy, hdcoef, hacoef, vdcoef;
int nthreads;
} *UserData;
/* Private Helper Functions */
static void SetIC(N_Vector u, UserData data);
static void PrintHeader(realtype reltol, realtype abstol, realtype umax);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char *argv[])
{
realtype dx, dy, reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNMatrix A;
SUNLinearSolver LS;
void *cvode_mem;
int iout, flag;
long int nst;
int num_threads;
u = NULL;
data = NULL;
A = NULL;
LS = NULL;
cvode_mem = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = strtol(argv[1], NULL, 0);
/* Create an OpenMP vector */
u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */
if(check_flag((void*)u, "N_VNew_OpenMP", 0)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
data = (UserData) malloc(sizeof *data); /* Allocate data memory */
if(check_flag((void *)data, "malloc", 2)) return(1);
dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */
dy = data->dy = YMAX/(MY+1);
data->hdcoef = ONE/(dx*dx);
data->hacoef = HALF/(TWO*dx);
data->vdcoef = ONE/(dy*dy);
data->nthreads = num_threads;
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula and the use of a Newton iteration */
cvode_mem = CVodeCreate(CV_BDF, CV_NEWTON);
if(check_flag((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the inital time T0, and
* the initial dependent variable vector u. */
flag = CVodeInit(cvode_mem, f, T0, u);
if(check_flag(&flag, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
flag = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_flag(&flag, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
flag = CVodeSetUserData(cvode_mem, data);
if(check_flag(&flag, "CVodeSetUserData", 1)) return(1);
/* Create banded SUNMatrix for use in linear solves -- since this will be factored,
set the storage bandwidth to be the sum of upper and lower bandwidths */
A = SUNBandMatrix(NEQ, MY, MY, 2*MY);
if(check_flag((void *)A, "SUNBandMatrix", 0)) return(1);
/* Create banded SUNLinearSolver object for use by CVode */
LS = SUNBandLinearSolver(u, A);
if(check_flag((void *)LS, "SUNBandLinearSolver", 0)) return(1);
/* Call CVDlsSetLinearSolver to attach the matrix and linear solver to CVode */
flag = CVDlsSetLinearSolver(cvode_mem, LS, A);
if(check_flag(&flag, "CVDlsSetLinearSolver", 1)) return(1);
/* Set the user-supplied Jacobian routine Jac */
flag = CVDlsSetJacFn(cvode_mem, Jac);
if(check_flag(&flag, "CVDlsSetJacFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
flag = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_flag(&flag, "CVode", 1)) break;
umax = N_VMaxNorm(u);
flag = CVodeGetNumSteps(cvode_mem, &nst);
check_flag(&flag, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
printf("num_threads = %i\n\n", num_threads);
N_VDestroy_OpenMP(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
SUNLinSolFree(LS); /* Free the linear solver memory */
SUNMatDestroy(A); /* Free the matrix memory */
free(data); /* Free the user data */
return(0);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u,N_Vector udot, void *user_data)
{
realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff;
realtype *udata, *dudata;
int i, j;
UserData data;
udata = NV_DATA_OMP(u);
dudata = NV_DATA_OMP(udot);
/* Extract needed constants from data */
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
/* Loop over all grid points. */
#pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
/* Extract u at x_i, y_j and four neighboring points */
uij = IJth(udata, i, j);
udn = (j == 1) ? ZERO : IJth(udata, i, j-1);
uup = (j == MY) ? ZERO : IJth(udata, i, j+1);
ult = (i == 1) ? ZERO : IJth(udata, i-1, j);
urt = (i == MX) ? ZERO : IJth(udata, i+1, j);
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
IJth(dudata, i, j) = hdiff + hadv + vdiff;
}
}
return(0);
}
/* Jacobian routine. Compute J(t,u). */
static int Jac(realtype t, N_Vector u, N_Vector fu,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
sunindextype i, j, k;
realtype *kthCol, hordc, horac, verdc;
UserData data;
/*
The components of f = udot that depend on u(i,j) are
f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with
df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2)
df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1)
df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX)
df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1)
df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY)
*/
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
verdc = data->vdcoef;
#pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads)
for (j=1; j <= MY; j++) {
for (i=1; i <= MX; i++) {
k = j-1 + (i-1)*MY;
kthCol = SUNBandMatrix_Column(J,k);
/* set the kth column of J */
SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc);
if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac;
if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac;
if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc;
if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc;
}
}
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
int i, j;
realtype x, y, dx, dy;
realtype *udata;
/* Extract needed constants from data */
dx = data->dx;
dy = data->dy;
/* Set pointer to data array in vector u. */
udata = NV_DATA_OMP(u);
/* Load initial profile into u vector */
#pragma omp parallel for default(shared) private(j, i, y, x)
for (j=1; j <= MY; j++) {
y = j*dy;
for (i=1; i <= MX; i++) {
x = i*dx;
IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*SUNRexp(FIVE*x*y);
}
}
}
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %d X %d\n", MX, MY);
printf("Total system size = %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
int flag;
long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS;
flag = CVodeGetNumSteps(cvode_mem, &nst);
check_flag(&flag, "CVodeGetNumSteps", 1);
flag = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_flag(&flag, "CVodeGetNumRhsEvals", 1);
flag = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_flag(&flag, "CVodeGetNumLinSolvSetups", 1);
flag = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_flag(&flag, "CVodeGetNumErrTestFails", 1);
flag = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_flag(&flag, "CVodeGetNumNonlinSolvIters", 1);
flag = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_flag(&flag, "CVodeGetNumNonlinSolvConvFails", 1);
flag = CVDlsGetNumJacEvals(cvode_mem, &nje);
check_flag(&flag, "CVDlsGetNumJacEvals", 1);
flag = CVDlsGetNumRhsEvals(cvode_mem, &nfeLS);
check_flag(&flag, "CVDlsGetNumRhsEvals", 1);
printf("\nFinal Statistics:\n");
printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n",
nst, nfe, nsetups, nfeLS, nje);
printf("nni = %-6ld ncfn = %-6ld netf = %ld\n",
nni, ncfn, netf);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
rawSHA256_ng_fmt_plug.c | /*
* Copyright 2013, epixoip.
* AVX2 support, Copyright (c) 2015 magnum
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistribution of source
* retains the above copyright.
*/
#include "arch.h"
#if SIMD_COEF_32
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256_ng;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256_ng);
#else
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#if _OPENMP
#include <omp.h>
#if __XOP__
#ifndef OMP_SCALE
#define OMP_SCALE 512 /* AMD */
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512 /* Intel */
#endif
#endif
#endif
#include "misc.h"
#if !defined(DEBUG) && !defined(WITH_ASAN)
// These compilers claim to be __GNUC__ but warn on gcc pragmas.
#if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER
#pragma GCC optimize 3
#endif
#endif
#include <string.h>
#include <stdint.h>
#include "pseudo_intrinsics.h"
#include "common.h"
#include "formats.h"
#include "aligned.h"
#include "memdbg.h"
#if __MIC__
#define SIMD_TYPE "512/512 MIC 16x"
#elif __AVX512F__
#define SIMD_TYPE "512/512 AVX512 16x"
#elif __AVX2__
#define SIMD_TYPE "256/256 AVX2 8x"
#elif __ALTIVEC__
#define SIMD_TYPE "128/128 AltiVec 4x"
#elif __ARM_NEON
#define SIMD_TYPE "128/128 NEON 4x"
#elif __XOP__
#define SIMD_TYPE "128/128 XOP 4x"
#elif __SSE4_1__
#define SIMD_TYPE "128/128 SSE4.1 4x"
#elif __SSSE3__
#define SIMD_TYPE "128/128 SSSE3 4x"
#else
#define SIMD_TYPE "128/128 SSE2 4x"
#endif
#define BINARY_SIZE 4
#define FORMAT_LABEL "Raw-SHA256-ng"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA256 " SIMD_TYPE
#define VWIDTH SIMD_COEF_32
#define MAXLEN 55
#define PLAINTEXT_LENGTH MAXLEN
#define CIPHERTEXT_LENGTH 64
#define DIGEST_SIZE 32
#define _RAWSHA256_H
#include "rawSHA256_common.h"
#undef _RAWSHA256_H
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT VWIDTH
#define MAX_KEYS_PER_CRYPT VWIDTH
#if __SSE4_1__ && !__AVX2__
#undef GATHER
#define GATHER(x, y, z) \
{ \
x = _mm_cvtsi32_si128( y[index][z] ); \
x = _mm_insert_epi32(x, y[index + 1][z], 1); \
x = _mm_insert_epi32(x, y[index + 2][z], 2); \
x = _mm_insert_epi32(x, y[index + 3][z], 3); \
}
#endif
#define S0(x) \
( \
vxor( \
vroti_epi32(x, -22), \
vxor( \
vroti_epi32(x, -2), \
vroti_epi32(x, -13) \
) \
) \
)
#define S1(x) \
( \
vxor( \
vroti_epi32(x, -25), \
vxor( \
vroti_epi32(x, -6), \
vroti_epi32(x, -11) \
) \
) \
)
#define s0(x) \
( \
vxor( \
vsrli_epi32(x, 3), \
vxor( \
vroti_epi32(x, -7), \
vroti_epi32(x, -18) \
) \
) \
)
#define s1(x) \
( \
vxor( \
vsrli_epi32(x, 10), \
vxor( \
vroti_epi32(x, -17), \
vroti_epi32(x, -19) \
) \
) \
)
#if !VCMOV_EMULATED
#define Maj(x,y,z) vcmov(x, y, vxor(z, y))
#else
#define Maj(x,y,z) vor(vand(x, y), vand(vor(x, y), z))
#endif
#define Ch(x,y,z) vcmov(y, z, x)
#define R(t) \
{ \
w[t] = vadd_epi32(s1(w[t - 2]), w[t - 7]); \
w[t] = vadd_epi32(s0(w[t - 15]), w[t]); \
w[t] = vadd_epi32( w[t - 16], w[t]); \
}
#define SHA256_STEP(a,b,c,d,e,f,g,h,x,K) \
{ \
if (x > 15) R(x); \
tmp1 = vadd_epi32(h, S1(e)); \
tmp1 = vadd_epi32(tmp1, Ch(e,f,g)); \
tmp1 = vadd_epi32(tmp1, vset1_epi32(K)); \
tmp1 = vadd_epi32(tmp1, w[x]); \
tmp2 = vadd_epi32(S0(a),Maj(a,b,c)); \
d = vadd_epi32(tmp1, d); \
h = vadd_epi32(tmp1, tmp2); \
}
static uint32_t (*saved_key)[64];
static uint32_t *crypt_key[ 8];
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), VWIDTH * 4);
for (i = 0; i < 8; i++)
crypt_key[i] = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(uint32_t), VWIDTH * 4);
}
static void done(void)
{
int i;
for (i = 0; i < 8; i++)
MEM_FREE(crypt_key[i]);
MEM_FREE(saved_key);
}
static int get_hash_0(int index) { return crypt_key[0][index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[0][index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[0][index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[0][index] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[0][index] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[0][index] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[0][index] & PH_MASK_6; }
static void set_key(char *key, int index)
{
uint32_t *buf32 = (uint32_t*) &saved_key[index];
uint8_t *buf8 = (uint8_t*) buf32;
int len = 0;
while (*key)
buf8[len++] = *key++;
buf32[15] = len << 3;
buf8[len++] = 0x80;
while (buf8[len] && len <= MAXLEN)
buf8[len++] = 0;
}
static char *get_key(int index)
{
uint32_t *buf = (uint32_t*) &saved_key[index];
static char out[MAXLEN + 1];
int len = buf[15] >> 3;
memset(out, 0, MAXLEN + 1);
memcpy(out, buf, len);
return (char*) out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += VWIDTH)
#endif
{
vtype a, b, c, d, e, f, g, h;
vtype w[64], tmp1, tmp2;
int i;
#if __SSE4_1__ && !__AVX2__
for (i=0; i < 16; i++) GATHER(w[i], saved_key, i);
for (i=0; i < 15; i++) vswap32(w[i]);
#else
JTR_ALIGN(VWIDTH * 4) uint32_t __w[16][VWIDTH];
int j;
for (i=0; i < VWIDTH; i++)
for (j=0; j < 16; j++)
__w[j][i] = saved_key[index + i][j];
for (i=0; i < 15; i++)
{
w[i] = vload((vtype*) __w[i]);
vswap32(w[i]);
}
w[15] = vload((vtype*) __w[15]);
#endif
a = vset1_epi32(0x6a09e667);
b = vset1_epi32(0xbb67ae85);
c = vset1_epi32(0x3c6ef372);
d = vset1_epi32(0xa54ff53a);
e = vset1_epi32(0x510e527f);
f = vset1_epi32(0x9b05688c);
g = vset1_epi32(0x1f83d9ab);
h = vset1_epi32(0x5be0cd19);
SHA256_STEP(a, b, c, d, e, f, g, h, 0, 0x428a2f98);
SHA256_STEP(h, a, b, c, d, e, f, g, 1, 0x71374491);
SHA256_STEP(g, h, a, b, c, d, e, f, 2, 0xb5c0fbcf);
SHA256_STEP(f, g, h, a, b, c, d, e, 3, 0xe9b5dba5);
SHA256_STEP(e, f, g, h, a, b, c, d, 4, 0x3956c25b);
SHA256_STEP(d, e, f, g, h, a, b, c, 5, 0x59f111f1);
SHA256_STEP(c, d, e, f, g, h, a, b, 6, 0x923f82a4);
SHA256_STEP(b, c, d, e, f, g, h, a, 7, 0xab1c5ed5);
SHA256_STEP(a, b, c, d, e, f, g, h, 8, 0xd807aa98);
SHA256_STEP(h, a, b, c, d, e, f, g, 9, 0x12835b01);
SHA256_STEP(g, h, a, b, c, d, e, f, 10, 0x243185be);
SHA256_STEP(f, g, h, a, b, c, d, e, 11, 0x550c7dc3);
SHA256_STEP(e, f, g, h, a, b, c, d, 12, 0x72be5d74);
SHA256_STEP(d, e, f, g, h, a, b, c, 13, 0x80deb1fe);
SHA256_STEP(c, d, e, f, g, h, a, b, 14, 0x9bdc06a7);
SHA256_STEP(b, c, d, e, f, g, h, a, 15, 0xc19bf174);
SHA256_STEP(a, b, c, d, e, f, g, h, 16, 0xe49b69c1);
SHA256_STEP(h, a, b, c, d, e, f, g, 17, 0xefbe4786);
SHA256_STEP(g, h, a, b, c, d, e, f, 18, 0x0fc19dc6);
SHA256_STEP(f, g, h, a, b, c, d, e, 19, 0x240ca1cc);
SHA256_STEP(e, f, g, h, a, b, c, d, 20, 0x2de92c6f);
SHA256_STEP(d, e, f, g, h, a, b, c, 21, 0x4a7484aa);
SHA256_STEP(c, d, e, f, g, h, a, b, 22, 0x5cb0a9dc);
SHA256_STEP(b, c, d, e, f, g, h, a, 23, 0x76f988da);
SHA256_STEP(a, b, c, d, e, f, g, h, 24, 0x983e5152);
SHA256_STEP(h, a, b, c, d, e, f, g, 25, 0xa831c66d);
SHA256_STEP(g, h, a, b, c, d, e, f, 26, 0xb00327c8);
SHA256_STEP(f, g, h, a, b, c, d, e, 27, 0xbf597fc7);
SHA256_STEP(e, f, g, h, a, b, c, d, 28, 0xc6e00bf3);
SHA256_STEP(d, e, f, g, h, a, b, c, 29, 0xd5a79147);
SHA256_STEP(c, d, e, f, g, h, a, b, 30, 0x06ca6351);
SHA256_STEP(b, c, d, e, f, g, h, a, 31, 0x14292967);
SHA256_STEP(a, b, c, d, e, f, g, h, 32, 0x27b70a85);
SHA256_STEP(h, a, b, c, d, e, f, g, 33, 0x2e1b2138);
SHA256_STEP(g, h, a, b, c, d, e, f, 34, 0x4d2c6dfc);
SHA256_STEP(f, g, h, a, b, c, d, e, 35, 0x53380d13);
SHA256_STEP(e, f, g, h, a, b, c, d, 36, 0x650a7354);
SHA256_STEP(d, e, f, g, h, a, b, c, 37, 0x766a0abb);
SHA256_STEP(c, d, e, f, g, h, a, b, 38, 0x81c2c92e);
SHA256_STEP(b, c, d, e, f, g, h, a, 39, 0x92722c85);
SHA256_STEP(a, b, c, d, e, f, g, h, 40, 0xa2bfe8a1);
SHA256_STEP(h, a, b, c, d, e, f, g, 41, 0xa81a664b);
SHA256_STEP(g, h, a, b, c, d, e, f, 42, 0xc24b8b70);
SHA256_STEP(f, g, h, a, b, c, d, e, 43, 0xc76c51a3);
SHA256_STEP(e, f, g, h, a, b, c, d, 44, 0xd192e819);
SHA256_STEP(d, e, f, g, h, a, b, c, 45, 0xd6990624);
SHA256_STEP(c, d, e, f, g, h, a, b, 46, 0xf40e3585);
SHA256_STEP(b, c, d, e, f, g, h, a, 47, 0x106aa070);
SHA256_STEP(a, b, c, d, e, f, g, h, 48, 0x19a4c116);
SHA256_STEP(h, a, b, c, d, e, f, g, 49, 0x1e376c08);
SHA256_STEP(g, h, a, b, c, d, e, f, 50, 0x2748774c);
SHA256_STEP(f, g, h, a, b, c, d, e, 51, 0x34b0bcb5);
SHA256_STEP(e, f, g, h, a, b, c, d, 52, 0x391c0cb3);
SHA256_STEP(d, e, f, g, h, a, b, c, 53, 0x4ed8aa4a);
SHA256_STEP(c, d, e, f, g, h, a, b, 54, 0x5b9cca4f);
SHA256_STEP(b, c, d, e, f, g, h, a, 55, 0x682e6ff3);
SHA256_STEP(a, b, c, d, e, f, g, h, 56, 0x748f82ee);
SHA256_STEP(h, a, b, c, d, e, f, g, 57, 0x78a5636f);
SHA256_STEP(g, h, a, b, c, d, e, f, 58, 0x84c87814);
SHA256_STEP(f, g, h, a, b, c, d, e, 59, 0x8cc70208);
SHA256_STEP(e, f, g, h, a, b, c, d, 60, 0x90befffa);
SHA256_STEP(d, e, f, g, h, a, b, c, 61, 0xa4506ceb);
SHA256_STEP(c, d, e, f, g, h, a, b, 62, 0xbef9a3f7);
SHA256_STEP(b, c, d, e, f, g, h, a, 63, 0xc67178f2);
a = vadd_epi32(a, vset1_epi32(0x6a09e667));
b = vadd_epi32(b, vset1_epi32(0xbb67ae85));
c = vadd_epi32(c, vset1_epi32(0x3c6ef372));
d = vadd_epi32(d, vset1_epi32(0xa54ff53a));
e = vadd_epi32(e, vset1_epi32(0x510e527f));
f = vadd_epi32(f, vset1_epi32(0x9b05688c));
g = vadd_epi32(g, vset1_epi32(0x1f83d9ab));
h = vadd_epi32(h, vset1_epi32(0x5be0cd19));
vstore((vtype*) &crypt_key[0][index], a);
vstore((vtype*) &crypt_key[1][index], b);
vstore((vtype*) &crypt_key[2][index], c);
vstore((vtype*) &crypt_key[3][index], d);
vstore((vtype*) &crypt_key[4][index], e);
vstore((vtype*) &crypt_key[5][index], f);
vstore((vtype*) &crypt_key[6][index], g);
vstore((vtype*) &crypt_key[7][index], h);
}
return count;
}
static int cmp_all(void *binary, int count)
{
vtype bin;
vtype digest;
int i = 0;
#ifdef _OPENMP
for (i = 0; i < count; i += VWIDTH)
#endif
{
digest = vload((vtype*) &crypt_key[0][i]);
bin = vset1_epi32(((uint32_t*) binary)[0]);
if (vanyeq_epi32(bin, digest))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return ((uint32_t*) binary)[0] == crypt_key[0][index];
}
static int cmp_exact(char *source, int index)
{
uint32_t *binary = sha256_common_binary(source);
int i;
for (i = 0; i < 8; i++)
if (((uint32_t*) binary)[i] != crypt_key[i][index])
return 0;
return 1;
}
struct fmt_main fmt_rawSHA256_ng = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
MAXLEN,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{
HEX_TAG,
CISCO_TAG
},
sha256_common_tests
}, {
init,
done,
fmt_default_reset,
sha256_common_prepare,
sha256_common_valid,
sha256_common_split,
sha256_common_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* SIMD_COEF_32 */
|
GB_assign_zombie5.c | //------------------------------------------------------------------------------
// GB_assign_zombie5: delete entries in C for C_replace_phase
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// For GrB_Matrix_assign, C(I,J)<M,repl>=..., if C_replace is true, and mask M
// is present, then any entry C(i,j) outside IxJ must be be deleted, if
// M(i,j)=0.
// See also GB_assign_zombie3 and GB_assign_zombie4.
#include "GB_assign.h"
#include "GB_ek_slice.h"
void GB_assign_zombie5
(
GrB_Matrix Z, // the matrix C, or a copy
const GrB_Matrix M,
const bool Mask_comp,
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get Z
//--------------------------------------------------------------------------
const int64_t *restrict Zh = Z->h ;
const int64_t *restrict Zp = Z->p ;
// const int64_t Znvec = Z->nvec ;
int64_t *restrict Zi = Z->i ;
int64_t nzombies = Z->nzombies ;
const int64_t znz = GB_NNZ (Z) ;
//--------------------------------------------------------------------------
// get M
//--------------------------------------------------------------------------
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = M->x ;
const size_t msize = M->type->size ;
const GB_cast_function cast_M =
GB_cast_factory (GB_BOOL_code, M->type->code) ;
const int64_t Mnvec = M->nvec ;
const bool M_is_hyper = M->is_hyper ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (znz, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 and
// vectors kfirst_slice [tid] to klast_slice [tid]. The first and last
// vectors may be shared with prior slices and subsequent slices.
int64_t pstart_slice [ntasks+1] ;
int64_t kfirst_slice [ntasks] ;
int64_t klast_slice [ntasks] ;
GB_ek_slice (pstart_slice, kfirst_slice, klast_slice, Z, ntasks) ;
//--------------------------------------------------------------------------
// each task creates its own zombies
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (int tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task description
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// scan vectors kfirst to klast for entries to delete
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get Z(:,j) and determine if j is outside the list J
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
// j_outside is true if column j is outside the Z(I,J) submatrix
bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ;
int64_t pZ_start, pZ_end ;
GB_get_pA_and_pC (&pZ_start, &pZ_end, NULL,
tid, k, kfirst, klast, pstart_slice, NULL, NULL, Zp) ;
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
int64_t pM_start, pM_end ;
int64_t pleft = 0 ;
int64_t pright = Mnvec - 1 ;
GB_lookup (M_is_hyper, Mh, Mp, &pleft, pright, j,
&pM_start, &pM_end) ;
//------------------------------------------------------------------
// iterate over all entries in Z(:,j)
//------------------------------------------------------------------
for (int64_t pZ = pZ_start ; pZ < pZ_end ; pZ++)
{
//--------------------------------------------------------------
// consider Z(i,j)
//--------------------------------------------------------------
// Z(i,j) is outside the Z(I,J) submatrix if either i is
// not in the list I, or j is not in J, or both.
int64_t i = Zi [pZ] ;
if (!GB_IS_ZOMBIE (i) &&
(j_outside || !GB_ij_is_in_list (I, nI, i, Ikind, Icolon)))
{
//----------------------------------------------------------
// Z(i,j) is a live entry not in the Z(I,J) submatrix
//----------------------------------------------------------
// Check the mask M to see if it should be deleted.
int64_t pM = pM_start ;
int64_t pright = pM_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Mi, pM, pright, found) ;
bool mij = false ;
if (found)
{
// found it
cast_M (&mij, Mx +(pM*msize), 0) ;
}
if (Mask_comp)
{
// negate the mask if Mask_comp is true
mij = !mij ;
}
if (!mij)
{
// delete Z(i,j) by marking it as a zombie
nzombies++ ;
Zi [pZ] = GB_FLIP (i) ;
}
}
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
Z->nzombies = nzombies ;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(24*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(24*t3+Nx+11,128));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),32*t4+30);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_cumsum.c | //------------------------------------------------------------------------------
// GB_cumsum: cumlative sum of an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Compute the cumulative sum of an array count[0:n], of size n+1
// in pseudo-MATLAB notation:
// k = sum (count [0:n-1] != 0) ;
// count = cumsum ([0 count[0:n-1]]) ;
// That is, count [j] on input is overwritten with the value of
// sum (count [0..j-1]). count [n] is implicitly zero on input.
// On output, count [n] is the total sum.
#include "GB.h"
void GB_cumsum // compute the cumulative sum of an array
(
int64_t *restrict count, // size n+1, input/output
const int64_t n,
int64_t *restrict kresult, // return k, if needed by the caller
int nthreads
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (count != NULL) ;
ASSERT (n >= 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
#if !defined ( _OPENMP )
nthreads = 1 ;
#endif
if (nthreads > 1)
{
nthreads = GB_IMIN (nthreads, n / 1024) ;
nthreads = GB_IMAX (nthreads, 1) ;
}
//--------------------------------------------------------------------------
// count = cumsum ([0 count[0:n-1]]) ;
//--------------------------------------------------------------------------
if (kresult == NULL)
{
if (nthreads <= 2)
{
//------------------------------------------------------------------
// cumsum with one thread
//------------------------------------------------------------------
int64_t s = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
count [n] = s ;
}
else
{
//------------------------------------------------------------------
// cumsum with multiple threads
//------------------------------------------------------------------
int64_t ws [GB_PGI_NTHREADS(nthreads)+1] ;
#pragma omp parallel num_threads(nthreads)
{
// each thread sums up its own part
int tid = GB_OPENMP_THREAD_ID ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t s = 0 ;
for (int64_t i = istart ; i < iend ; i++)
{
s += count [i] ;
}
ws [tid] = s ;
#pragma omp barrier
// each thread computes the cumsum of its own part
s = 0 ;
for (int i = 0 ; i < tid ; i++)
{
s += ws [i] ;
}
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
if (iend == n)
{
count [n] = s ;
}
}
}
}
else
{
if (nthreads <= 2)
{
//------------------------------------------------------------------
// cumsum with one thread, also compute k
//------------------------------------------------------------------
int64_t k = 0 ;
int64_t s = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
int64_t c = count [i] ;
if (c != 0) k++ ;
count [i] = s ;
s += c ;
}
count [n] = s ;
(*kresult) = k ;
}
else
{
//------------------------------------------------------------------
// cumsum with multiple threads, also compute k
//------------------------------------------------------------------
int64_t ws [GB_PGI_NTHREADS(nthreads)+1] ;
int64_t wk [GB_PGI_NTHREADS(nthreads)+1] ;
#pragma omp parallel num_threads(nthreads)
{
// each thread sums up its own part
int tid = GB_OPENMP_THREAD_ID ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, n, tid, nthreads) ;
int64_t k = 0 ;
int64_t s = 0 ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
if (c != 0) k++ ;
s += c ;
}
ws [tid] = s ;
wk [tid] = k ;
#pragma omp barrier
// each thread computes the cumsum of its own part
s = 0 ;
for (int i = 0 ; i < tid ; i++)
{
s += ws [i] ;
}
for (int64_t i = istart ; i < iend ; i++)
{
int64_t c = count [i] ;
count [i] = s ;
s += c ;
}
if (iend == n)
{
count [n] = s ;
}
}
int64_t k = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
k += wk [tid] ;
}
(*kresult) = k ;
}
}
}
|
GB_binop__plus_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp64)
// A*D function (colscale): GB (_AxD__plus_fp64)
// D*A function (rowscale): GB (_DxB__plus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp64)
// C=scalar+B GB (_bind1st__plus_fp64)
// C=scalar+B' GB (_bind1st_tran__plus_fp64)
// C=A+scalar GB (_bind2nd__plus_fp64)
// C=A'+scalar GB (_bind2nd_tran__plus_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FP64 || GxB_NO_PLUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_get_nthrs.1.c | /*
* @@name: get_nthrs.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: rt-error
*/
#include <omp.h>
void work(int i);
void incorrect() {
int np, i;
np = omp_get_num_threads(); /* misplaced */
#pragma omp parallel for schedule(static)
for (i=0; i < np; i++)
work(i);
}
|
crcw.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#define modul(x) ((x) < 0 ? -(x) : (x))
#define max(a, b) ((a) > (b) ? (a) : (b))
#define eps 1e-10
int N, P;
double *A;
double *B;
double *C;
double *D;
// functie care genereaza un numar random in intervalul [min, max]
double get_random_number(int min, int max) {
return min + (max - min) * ((double)rand() / (double)RAND_MAX);
}
// functie care compara doua numere de tip double
double cmp(double a, double b) {
double c = modul(a);
double d = modul(b);
d = max(c, d);
return d == 0.0 ? 0.0 : modul(a - b) / d;
}
// functie care extrage parametrii din linie de comanda
// N = dimensiunea matricilor, P = numarul de threaduri (trebuie sa fie egal cu N^3)
void getArgs(int argc, char **argv) {
if(argc != 3) {
printf("Not enough parameters: ./program N P\n");
exit(1);
}
N = atoi(argv[1]);
P = atoi(argv[2]);
}
// functie care aloca dinamic spatiu pentru matricile utilizate
// A, B = matricile date ca input
// C = matricea obtinuta prin paralelizarea inmultirii matricelor A si B
// D = matricea obtinuta in varianta seriala a inmultirii matricelor A si B
void init(int N) {
int i, j, k;
A = malloc(N * N * sizeof(double));
B = malloc(N * N * sizeof(double));
C = malloc(N * N * sizeof(double));
D = malloc(N * N * sizeof(double));
if(A == NULL || B == NULL || C == NULL || D == NULL) {
printf("Malloc failed!\n");
exit(1);
}
srand(time(NULL));
for(k = 0; k < N * N; k++) {
i = k / N;
j = k % N;
A[i * N + j] = get_random_number(0.0, 100.0);
B[i * N + j] = get_random_number(0.0, 100.0);
}
}
// functie care elibereaza memoria utilizata
void free_memory() {
free(A);
free(B);
free(C);
free(D);
}
int main(int argc, char** argv) {
int i, j, k;
struct timeval tv1, tv2;
struct timezone tz;
double elapsed;
double sum = 0.0;
getArgs(argc, argv);
init(N);
// seteaza numarul de thereaduri
omp_set_num_threads(P);
// start measuring time
gettimeofday(&tv1, &tz);
// paralelizeaza inmultirea matricelor A si B folosind modelul CRCW-PRAM
#pragma omp parallel for private (i, j, k) shared(A, B, C) reduction (+:sum)
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
sum = 0.0;
for(k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[i * N + j] = sum;
}
}
// end measuring time
gettimeofday(&tv2, &tz);
elapsed = (double)(tv2.tv_sec - tv1.tv_sec) + (double)(tv2.tv_usec - tv1.tv_usec) * 1.e-6;
// calculeaza inmultirea matricelor A si B in varianta seriala
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
D[i * N + j] = 0.0;
for(k = 0; k < N; k++) {
D[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
}
// verifica daca rezultatul obtinut de algoritmul serial difera de
// rezultatul obtinut folosind algoritmul paralel, cu cel mult toleranta eps
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
if(cmp(C[i * N + j], D[i * N + j]) > eps) {
printf("Matrix elements differ!\n");
free_memory();
break;
}
}
}
printf("%d, %d, %.5f\n", N, P, elapsed);
free_memory();
return 0;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
//
// TODO: Add the following runtime calls.
// omp_set_num_threads
//
// All Lock Routines.
//
int main(void) {
// CHECK: Able to use offloading!
check_offloading();
int fail;
double A[N], B[N], C[N], D[N], E[N];
INIT();
//
// Test: omp_get_num_threads()
//
ZERO(A);
TEST({
A[0] = omp_get_num_threads(); // 1
_Pragma("omp parallel num_threads(128)")
{
if (omp_get_thread_num() == 3) {
A[0] += omp_get_num_threads(); // 128
}
}
}, VERIFY(0, 1, A[i], 129));
//
// Test: omp_get_max_threads() (depends on device type)
//
ZERO(A);
TEST({
A[0] = omp_get_max_threads();
_Pragma("omp parallel")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_max_threads(); // 1
A[1] = omp_get_num_threads();
}
}
}, if (!omp_is_initial_device()) VERIFY(0, 1, A[i], A[1] + 1));
//
// Test: omp_get_num_procs()
//
ZERO(A);
TEST({
A[0] = omp_get_num_procs();
_Pragma("omp parallel")
{
if (omp_get_thread_num() == 18) {
A[0] += omp_get_num_procs();
A[1] = 2*omp_get_num_threads();
}
}
}, VERIFY(0, 1, A[i], A[1]));
//
// Test: omp_in_parallel()
//
ZERO(A);
TEST({
A[0] = omp_in_parallel(); // 0
// Serialized parallel
_Pragma("omp parallel num_threads(32) if (A[0] == 0)")
{
A[0] += omp_in_parallel(); // 0
}
// Parallel execution
_Pragma("omp parallel num_threads(32) if (A[0] == 0)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_in_parallel(); // 1
}
}
}, VERIFY(0, 1, A[i], 1));
//
// Test: omp_set/get_dynamic()
//
ZERO(A);
TEST({
A[0] = omp_get_dynamic(); // 0
omp_set_dynamic(1);
A[0] += omp_get_dynamic(); // 1
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_dynamic(); // 1
omp_set_dynamic(0); // Only for this parallel region.
}
}
A[0] += omp_get_dynamic(); // 1
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_get_cancellation()
// FIXME: Rewrite test case once we have cancellation support.
//
ZERO(A);
TEST({
A[0] = omp_get_cancellation(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_cancellation(); // 0
}
}
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_set/get_nested(). Not used on the device currently.
//
ZERO(A);
TEST({
A[0] = omp_get_nested(); // 0
omp_set_nested(0);
A[0] += omp_get_nested(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 18) {
A[0] += omp_get_nested(); // 0
omp_set_nested(0);
}
}
A[0] += omp_get_nested(); // 0
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_set/get_schedule().
//
ZERO(A);
int result = 2 * (omp_sched_static + omp_sched_dynamic + omp_sched_guided) + omp_sched_static;
result += 2 * (1110) + 10;
TEST({
omp_sched_t t; int chunk_size;
t = omp_sched_static; chunk_size = 10;
omp_set_schedule(t, chunk_size);
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] = t + chunk_size;
t = omp_sched_dynamic; chunk_size = 100;
omp_set_schedule(t, chunk_size);
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_guided; chunk_size = 1000;
omp_set_schedule(t, chunk_size);
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_static; chunk_size = 10;
omp_set_schedule(t, chunk_size);
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
omp_sched_t t; int chunk_size;
t = omp_sched_static; chunk_size = 10;
omp_set_schedule(t, chunk_size);
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_dynamic; chunk_size = 100;
omp_set_schedule(t, chunk_size);
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
t = omp_sched_guided; chunk_size = 1000;
omp_set_schedule(t, chunk_size);
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size);
A[0] += t + chunk_size;
}
}
t = 0; chunk_size = 0;
omp_get_schedule(&t, &chunk_size); // should read 1, 10;
A[0] += t + chunk_size;
}, VERIFY(0, 1, A[i], result));
//
// Test: omp_get_thread_limit()
//
ZERO(A);
TEST({
A[0] = omp_get_thread_limit();
_Pragma("omp parallel")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_thread_limit();
A[1] = 2*omp_get_num_threads();
}
}
}, VERIFY(0, 1, A[i], A[1]));
//
// Test: omp_set/get_max_active_levels()
//
ZERO(A);
TEST({
// Our runtime ignores this.
omp_set_max_active_levels(1);
A[0] = omp_get_max_active_levels(); // 1
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_max_active_levels(); // 1
}
}
}, VERIFY(0, 1, A[i], 2));
//
// Test: omp_get_level()
//
ZERO(A);
TEST({
A[0] = omp_get_level(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_level(); // 1
}
}
}, VERIFY(0, 1, A[i], 1));
//
// Test: omp_get_ancestor_thread_num()
//
ZERO(A);
TEST({
A[0] = omp_get_ancestor_thread_num(0); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_ancestor_thread_num(0) + omp_get_ancestor_thread_num(1); // 0 + 18
}
}
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_get_team_size()
//
ZERO(A);
TEST({
A[0] = omp_get_team_size(0) + omp_get_team_size(1); // 1 + 1
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_team_size(0) + omp_get_team_size(1); // 1 + 19
}
}
}, if (!omp_is_initial_device()) VERIFY(0, 1, A[i], 22)); // TODO: fix host execution
//
// Test: omp_get_active_level()
//
ZERO(A);
TEST({
A[0] = omp_get_active_level(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
if (omp_get_num_threads() == 1)
A[0] += 1;
else
A[0] += omp_get_active_level(); // 1
}
}
}, VERIFY(0, 1, A[i], 1));
//
// Test: omp_in_final()
//
ZERO(A);
TEST({
A[0] = omp_in_final(); // 1 always returns true.
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_in_final(); // 1 always returns true.
}
}
}, VERIFY(0, 1, A[i], omp_is_initial_device() ? 0 : 2));
//
// Test: omp_get_proc_bind()
//
ZERO(A);
TEST({
A[0] = omp_get_proc_bind(); // 1 always returns omp_proc_bind_true.
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_proc_bind(); // 1 always returns omp_proc_bind_true.
}
}
}, VERIFY(0, 1, A[i], omp_is_initial_device() ? 0 : 2));
#if 0
//
// Test: Place routines (linking only).
//
ZERO(A);
TEST({
(void) omp_get_num_places();
(void) omp_get_place_num_procs(0);
int *ids;
omp_get_place_proc_ids(0, ids);
(void) omp_get_place_num();
(void) omp_get_partition_num_places();
int *place_nums;
omp_get_partition_place_nums(place_nums);
}, VERIFY(0, 1, A[i], 0));
#endif
//
// Test: omp_set/get_default_device()
//
ZERO(A);
TEST({
omp_set_default_device(0); // Not used on device.
A[0] = omp_get_default_device(); // 0 always returns 0.
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_default_device(); // 0 always returns 0.
}
}
}, VERIFY(0, 1, A[i], 0));
//
// Test: omp_get_num_devices(). Undefined on the target.
//
ZERO(A);
TEST({
A[0] = omp_get_num_devices();
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[1] = omp_get_num_devices();
}
}
}, VERIFY(0, 1, A[i], A[i] - A[1]));
//
// Test: omp_get_num_teams(), omp_get_team_num()
// FIXME: Start teams region when supported.
//
ZERO(A);
TEST({
A[0] = omp_get_num_teams(); // 1
A[0] += omp_get_team_num(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_get_num_teams(); // 1
A[0] += omp_get_team_num(); // 0
}
}
}, VERIFY(0, 1, A[i], 2));
//
// Test: omp_is_initial_device()
//
ZERO(A);
A[1] = omp_is_initial_device();
TEST({
A[0] = omp_is_initial_device(); // 0
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 0) {
A[0] += omp_is_initial_device(); // 0
}
}
}, VERIFY(0, 1, A[i], omp_is_initial_device() ? A[1] - A[1] : 2.0));
return 0;
#if 0
//
// Test: omp_get_initial_device(). Unspecified behavior when
// called from device.
//
ZERO(A);
TEST({
A[0] = omp_get_initial_device();
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 18) {
A[0] -= omp_get_initial_device();
}
}
}, VERIFY(0, 1, A[i], 0));
#endif
#if 0
//
// Test: omp_get_max_task_priority().
// TODO: Not used on the gpu at the moment.
//
ZERO(A);
TEST({
A[0] = omp_get_max_task_priority();
_Pragma("omp parallel num_threads(19)")
{
if (omp_get_thread_num() == 18) {
A[0] -= omp_get_max_task_priority();
}
}
}, VERIFY(0, 1, A[i], 0));
#endif
//
// Test: Timing Routines (linking only).
//
ZERO(A);
TEST({
double precision;
precision = omp_get_wtick();
double start; double end;
start = omp_get_wtime();
end = omp_get_wtime();
}, VERIFY(0, 1, A[i], 0));
return 0;
}
|
schedules.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
void mat_mult(int n, double *A, double *B, double *C);
void init(int n, int size, double **A, double **B, double **C, int **ptr);
long usecs ();
int main(){
int size, i, *ptr, ld, m, n, k;
double *A, *B, *C;
long t_start, t_end;
size = 1500;
init(n, size, &A, &B, &C, &ptr);
t_start = usecs();
/* Parallelize this loop */
/* CC: the dynamic schedule is important for performnce because the
cost of a matrix multiplication increases with the cibe of n */
#pragma omp parallel for schedule(dynamic,1)
for (n=size; n>=100; n-=100){
/* Multiply two matrices of size n: C = A*B */
mat_mult(n, A+ptr[n], B+ptr[n], C+ptr[n]);
}
t_end = usecs();
printf("time : %8.2f msec.\n",((double)t_end-t_start)/1000.0);
return 0;
}
/* Matrix multiplication routine */
void mat_mult(int n, double *A, double *B, double *C){
double alpha, beta;
char NoTran='N';
alpha = 3.1;
beta = 0.7;
dgemm_(&NoTran, &NoTran,
&n, &n, &n,
&alpha,
A, &n,
B, &n,
&beta,
C, &n);
return;
}
/* Initialization routine */
void init(int n, int size, double **A, double **B, double **C, int **ptr){
int i, totsize;
totsize=0;
*ptr = (int*)malloc((size+1)*sizeof(int));
for (i=100; i<=size; i+=100){
(*ptr)[i] = totsize;
totsize+=i*i;
}
*A = (double*)malloc(totsize*sizeof(double));
*B = (double*)malloc(totsize*sizeof(double));
*C = (double*)malloc(totsize*sizeof(double));
for (i=0; i<totsize; i++){
(*A)[i] = (double)rand()/RAND_MAX;
(*B)[i] = (double)rand()/RAND_MAX;
(*C)[i] = (double)rand()/RAND_MAX;
}
return;
}
/* Timer */
long usecs (){
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec*1000000+t.tv_usec;
}
|
GB_unaryop__ainv_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int64_uint32
// op(A') function: GB_tran__ainv_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int64_uint32
(
int64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calc_pi.c | #include <stdlib.h>
#include <stdio.h>
static const int numDivisions = 100000000;
static double divisionWidth;
void calcAreas(double* areas)
{
int i = 0;
double y = 0.0;
double curX = 0.0;
#pragma omp parallel for
for(i = 0; i < numDivisions; i++)
{
curX = ((double)i + 0.5) * divisionWidth;
y = 4.0/(1.0 + (curX * curX));
areas[i] = divisionWidth * y;
}
}
double sumAreas(double* areas)
{
int i = 0;
double sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for(i = 0; i < numDivisions; i++)
{
sum += areas[i];
}
return sum;
}
int main(int argc, char** argv)
{
double* areas = (double*)malloc(sizeof(double) * numDivisions);
double pi = 0.0;
divisionWidth = 1.0/(double)numDivisions;
printf("Calculating %d areas...\n", numDivisions);
calcAreas(areas);
printf("Summing %d areas...\n", numDivisions);
pi = sumAreas(areas);
printf("Pi: %1.16f\n", pi);
free(areas);
return 0;
}
|
kncmpush3.c | /* KNC C Library for Skeleton 3D Electrostatic OpenMP/Vector PIC Code */
/* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include <string.h>
#include <immintrin.h>
#include "kncmpush3.h"
/*--------------------------------------------------------------------*/
void ckncgppush3lt(float ppart[], float fxyz[], int kpic[], float qbm,
float dt, float *ek, int idimp, int nppmx, int nx,
int ny, int nz, int mx, int my, int mz, int nxv,
int nyv, int nzv, int mx1, int my1, int mxyz1,
int ipbc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
94 flops/particle, 30 loads, 6 stores
input: all, output: ppart, ek
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t),z(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t),z(t))*dt,
vz(t+dt/2) = vz(t-dt/2) + (q/m)*fz(x(t),y(t),z(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt,
z(t+dt) = z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
fy(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fy(n,m,l)+dx*fy(n+1,m,l))
+ dy*((1-dx)*fy(n,m+1,l) + dx*fy(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fy(n,m,l+1)+dx*fy(n+1,m,l+1))
+ dy*((1-dx)*fy(n,m+1,l+1) + dx*fy(n+1,m+1,l+1)))
fz(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fz(n,m,l)+dx*fz(n+1,m,l))
+ dy*((1-dx)*fz(n,m+1,l) + dx*fz(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fz(n,m,l+1)+dx*fz(n+1,m,l+1))
+ dy*((1-dx)*fz(n,m+1,l+1) + dx*fz(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2+
(vz(t+dt/2)+vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field array, must be >= nx+1
nyv = third dimension of field array, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic)
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, mxv, myv, mxyv, nxyv;
float qtm, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dxp, dyp, dzp, amx, amy, amz, dx1, x, y, z, dx, dy, dz;
float vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_perm;
__m512 v_qtm, v_dt, v_one, v_zero;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, p, q, r, s;
__m512d v_sum1, v_d;
__mmask16 msk;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtm = qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgelz = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
edgerz = (float) nz;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgelz = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
edgerz = (float) (nz-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtm = _mm512_set1_ps(qtm);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_dt = _mm512_set1_ps(dt);
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,x,y,z,dxp,dyp, \
dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,sum1,v_noff,v_moff,v_loff,v_nn, \
v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1, \
v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_at,v_d,v_sum1,a,b,c,d,e,f,g,p,q,r,s, \
msk,kk,dd,sfxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
ll = (mz < nz-loff ? mz : nz-loff) + 1;
nps = 4*(nn/4);
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of acceleration */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of acceleration */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nn,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of acceleration */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of acceleration */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* new velocity */
/* dxp = ppart[j+3*nppmx+npoff]; */
/* dyp = ppart[j+4*nppmx+npoff]; */
/* dzp = ppart[j+5*nppmx+npoff]; */
v_dxp = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_dyp = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_dzp = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* vx = dxp + qtm*dx; */
/* vy = dyp + qtm*dy; */
/* vz = dzp + qtm*dz; */
v_vx = _mm512_fmadd_ps(v_qtm,v_dx,v_dxp);
v_vy = _mm512_fmadd_ps(v_qtm,v_dy,v_dyp);
v_vz = _mm512_fmadd_ps(v_qtm,v_dz,v_dzp);
/* average kinetic energy */
/* dxp += vx; */
/* dyp += vy; */
/* dzp += vz; */
v_dxp = _mm512_add_ps(v_dxp,v_vx);
v_dyp = _mm512_add_ps(v_dyp,v_vy);
v_dzp = _mm512_add_ps(v_dzp,v_vz);
/* sum1 += dxp*dxp + dyp*dyp + dzp*dzp; */
v_at = _mm512_mul_ps(v_dxp,v_dxp);
v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dyp,v_dyp));
v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dzp,v_dzp));
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* new position */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* reflecting boundary conditions */
if (ipbc==2) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* if ((dz < edgelz) || (dz >= edgerz)) { */
/* dz = z; */
/* vz = -vz; */
/* } */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz,
_MM_CMPINT_GE));
v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z);
v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz);
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
/* if ((dx < edgelx) || (dx >= edgerx)) { */
/* dx = x; */
/* vx = -vx; */
/* } */
msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx,
_MM_CMPINT_GE));
v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x);
v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx);
/* if ((dy < edgely) || (dy >= edgery)) { */
/* dy = y; */
/* vy = -vy; */
/* } */
msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery,
_MM_CMPINT_GE));
v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y);
v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy);
/* if (dz < edgelz) dz += edgerz; */
msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
v_dz = _mm512_mask_add_ps(v_dz,msk,v_dz,v_edgerz);
/* if (dz >= edgerz) dz -= edgerz; */
msk = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
v_dz = _mm512_mask_sub_ps(v_dz,msk,v_dz,v_edgerz);
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new velocity */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find acceleration */
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
dx = amz*(dx + dyp*sfxyz[nn+4*mxv] + dx1*sfxyz[nn+4*mxv+4]);
dy = amz*(dy + dyp*sfxyz[nn+4*mxv+1] + dx1*sfxyz[nn+4*mxv+1+4]);
dz = amz*(dz + dyp*sfxyz[nn+4*mxv+2] + dx1*sfxyz[nn+4*mxv+2+4]);
mm = nn + 4*mxyv;
vx = amx*sfxyz[mm] + amy*sfxyz[mm+4];
vy = amx*sfxyz[mm+1] + amy*sfxyz[mm+1+4];
vz = amx*sfxyz[mm+2] + amy*sfxyz[mm+2+4];
dx = dx + dzp*(vx + dyp*sfxyz[mm+4*mxv] + dx1*sfxyz[mm+4*mxv+4]);
dy = dy + dzp*(vy + dyp*sfxyz[mm+4*mxv+1] + dx1*sfxyz[mm+4*mxv+1+4]);
dz = dz + dzp*(vz + dyp*sfxyz[mm+4*mxv+2] + dx1*sfxyz[mm+4*mxv+2+4]);
/* new velocity */
dxp = ppart[j+3*nppmx+npoff];
dyp = ppart[j+4*nppmx+npoff];
dzp = ppart[j+5*nppmx+npoff];
vx = dxp + qtm*dx;
vy = dyp + qtm*dy;
vz = dzp + qtm*dz;
/* average kinetic energy */
dxp += vx;
dyp += vy;
dzp += vz;
sum1 += dxp*dxp + dyp*dyp+ dzp*dzp;
/* new position */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
if ((dz < edgelz) || (dz >= edgerz)) {
dz = z;
vz = -vz;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new velocity */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgppushf3lt(float ppart[], float fxyz[], int kpic[], int ncl[],
int ihole[], float qbm, float dt, float *ek,
int idimp, int nppmx, int nx, int ny, int nz,
int mx, int my, int mz, int nxv, int nyv, int nzv,
int mx1, int my1, int mxyz1, int ntmax, int *irc) {
/* for 3d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP/vector version using guard cells
data read in tiles
particles stored segmented array
94 flops/particle, 30 loads, 6 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t),z(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t),z(t))*dt,
vz(t+dt/2) = vz(t-dt/2) + (q/m)*fz(x(t),y(t),z(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt,
z(t+dt) = z(t) + vz(t+dt/2)*dt
fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t))
are approximated by interpolation from the nearest grid points:
fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l))
+ dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1))
+ dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1)))
fy(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fy(n,m,l)+dx*fy(n+1,m,l))
+ dy*((1-dx)*fy(n,m+1,l) + dx*fy(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fy(n,m,l+1)+dx*fy(n+1,m,l+1))
+ dy*((1-dx)*fy(n,m+1,l+1) + dx*fy(n+1,m+1,l+1)))
fz(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fz(n,m,l)+dx*fz(n+1,m,l))
+ dy*((1-dx)*fz(n,m+1,l) + dx*fz(n+1,m+1,l)))
+ dz*((1-dy)*((1-dx)*fz(n,m,l+1)+dx*fz(n+1,m,l+1))
+ dy*((1-dx)*fz(n,m+1,l+1) + dx*fz(n+1,m+1,l+1)))
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppart[m][3][n] = velocity vx of particle n in tile m
ppart[m][4][n] = velocity vy of particle n in tile m
ppart[m][5][n] = velocity vz of particle n in tile m
fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l)
fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l)
fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l)
that is, convolution of electric field over particle shape
kpic[l] = number of particles in tile l
ncl[l][i] = number of particles going to destination i, tile l
ihole[l][:][0] = location of hole in array left by departing particle
ihole[l][:][1] = direction destination of particle leaving hole
all for tile l
ihole[l][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2+
(vz(t+dt/2)+vz(t-dt/2))**2)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = second dimension of field array, must be >= nx+1
nyv = third dimension of field array, must be >= ny+1
nzv = fourth dimension of field array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
fxyz needs to have 4 components, although one is not used
optimized version
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, ii, ih, nh, nn, mm, ll, mxv, myv, mxyv, nxyv;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float qtm, dxp, dyp, dzp, amx, amy, amz, dx1, x, y, z, dx, dy, dz;
float vx, vy, vz;
double sum1, sum2;
__m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4;
__m512i v_nn, v_mm, v_ll, v_it, v_0, v_1, v_3, v_9, v_perm;
__m512 v_qtm, v_dt, v_one, v_zero, v_anx, v_any, v_anz;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 a, b, c, d, e, f, g, p, q, r, s;
__m512d v_sum1, v_d;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) double dd[8];
__attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx+1;
myv = my+1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
qtm = qbm*dt;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
sum2 = 0.0;
/* set boundary values */
v_mxv4 = _mm512_set1_epi32(4*mxv);
v_mxyv4 = _mm512_set1_epi32(4*mxyv);
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0);
v_qtm = _mm512_set1_ps(qtm);
v_one = _mm512_set1_ps(1.0f);
v_zero = _mm512_setzero_ps();
v_dt = _mm512_set1_ps(dt);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,x,y,z, \
dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,edgelx,edgely,edgelz, \
edgerx,edgery,edgerz,sum1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x, \
v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx, \
v_vy,v_vz,v_at,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz, \
v_d,v_sum1,a,b,c,d,e,f,g,p,q,r,s,msk1,msk2,kk,dd,sfxyz) \
reduction(+:sum2)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
ih = 0;
nh = 0;
nn += 1;
mm += 1;
ll += 1;
/* load local fields from global array */
nps = 4*(nn/4);
for (k = 0; k < ll; k++) {
for (j = 0; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 0; i < nn; i++) { */
/* sfxyz[4*(i+mxv*j+mxyv*k)] */
/* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[1+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* sfxyz[2+4*(i+mxv*j+mxyv*k)] */
/* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */
/* } */
for (i = 0; i < nps; i+=4) {
m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff));
v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]);
m = 4*(i + mxv*j + mxyv*k);
_mm512_packstorelo_ps(&sfxyz[m],v_at);
_mm512_packstorehi_ps(&sfxyz[m+16],v_at);
}
/* loop over remaining elements */
for (i = nps; i < nn; i++) {
sfxyz[4*(i+mxv*j+mxyv*k)]
= fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[1+4*(i+mxv*j+mxyv*k)]
= fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[2+4*(i+mxv*j+mxyv*k)]
= fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
sfxyz[3+4*(i+mxv*j+mxyv*k)]
= fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))];
}
}
}
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
sum1 = 0.0;
v_sum1 = _mm512_set1_pd(0.0);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = x - (float) nn; */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_sub_ps(v_x,v_dxp);
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv4,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm));
v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it);
/* amx = 1.0f - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_one,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find first part of acceleration */
/* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_dx = _mm512_mul_ps(v_amx,a);
v_dx = _mm512_fmadd_ps(v_amy,p,v_dx);
/* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_dy = _mm512_mul_ps(v_amx,b);
v_dy = _mm512_fmadd_ps(v_amy,q,v_dy);
/* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_dz = _mm512_mul_ps(v_amx,c);
v_dz = _mm512_fmadd_ps(v_amy,r,v_dz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find second part of acceleration */
/* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx);
v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx);
v_dx = _mm512_mul_ps(v_amz,v_dx);
/* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy);
v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy);
v_dy = _mm512_mul_ps(v_amz,v_dy);
/* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz);
v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz);
v_dz = _mm512_mul_ps(v_amz,v_dz);
/* nn += 4*mxyv; */
v_nn = _mm512_add_epi32(v_nn,v_mxyv4);
_mm512_store_epi32(kk,v_nn);
/* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */
/* first block of 4 particles */
mm = kk[0];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14];
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15];
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[nn:nn+3] field components */
/* where nn = nn + 4*mxyv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */
/* where nn = nn + 4*mxyv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find third part of acceleration */
/* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */
v_vx = _mm512_mul_ps(v_amx,a);
v_vx = _mm512_fmadd_ps(v_amy,p,v_vx);
/* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */
v_vy = _mm512_mul_ps(v_amx,b);
v_vy = _mm512_fmadd_ps(v_amy,q,v_vy);
/* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */
v_vz = _mm512_mul_ps(v_amx,c);
v_vz = _mm512_fmadd_ps(v_amy,r,v_vz);
/* mm = nn + 4*mxv; */
/* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */
/* first block of 4 particles */
mm = kk[0] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[1] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[2] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[3] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* second block of 4 particles */
mm = kk[4] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[5] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[6] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[7] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* third block of 4 particles */
mm = kk[8] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[9] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[10] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[11] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* fourth block of 4 particles */
mm = kk[12] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[13] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255),
&sfxyz[mm+16]);
mm = kk[14] + 4*mxv;
e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280),
&sfxyz[mm]);
e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280),
&sfxyz[mm+16]);
mm = kk[15] + 4*mxv;
f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280),
&sfxyz[mm]);
f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280),
&sfxyz[mm+16]);
d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177);
s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177);
/* perform 16x3 transpose for sfxyz[mm:mm+3] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177);
f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177);
g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177);
b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78);
/* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */
/* where mm = nn + 4*mxyv + 4*mxv; */
p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p);
q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q);
r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r);
s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s);
e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177);
f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177);
g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177);
q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177);
p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78);
r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78);
q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78);
/* find fourth part of acceleration */
/* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */
v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx);
v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx);
v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx);
/* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */
v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy);
v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy);
v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy);
/* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */
v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz);
v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz);
v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz);
/* new velocity */
/* dxp = ppart[j+3*nppmx+npoff]; */
/* dyp = ppart[j+4*nppmx+npoff]; */
/* dzp = ppart[j+5*nppmx+npoff]; */
v_dxp = _mm512_load_ps(&ppart[j+3*nppmx+npoff]);
v_dyp = _mm512_load_ps(&ppart[j+4*nppmx+npoff]);
v_dzp = _mm512_load_ps(&ppart[j+5*nppmx+npoff]);
/* vx = dxp + qtm*dx; */
/* vy = dyp + qtm*dy; */
/* vz = dzp + qtm*dz; */
v_vx = _mm512_fmadd_ps(v_qtm,v_dx,v_dxp);
v_vy = _mm512_fmadd_ps(v_qtm,v_dy,v_dyp);
v_vz = _mm512_fmadd_ps(v_qtm,v_dz,v_dzp);
/* average kinetic energy */
/* dxp += vx; */
/* dyp += vy; */
/* dzp += vz; */
v_dxp = _mm512_add_ps(v_dxp,v_vx);
v_dyp = _mm512_add_ps(v_dyp,v_vy);
v_dzp = _mm512_add_ps(v_dzp,v_vz);
/* sum1 += dxp*dxp + dyp*dyp + dzp*dzp; */
v_at = _mm512_mul_ps(v_dxp,v_dxp);
v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dyp,v_dyp));
v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dzp,v_dzp));
/* convert to double precision before accumulating */
v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78));
v_sum1 = _mm512_add_pd(v_sum1,v_d);
/* new position */
/* dx = x + vx*dt; */
/* dy = y + vy*dt; */
/* dz = z + vz*dt; */
v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x);
v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y);
v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z);
/* find particles going out of bounds */
/* mm = 0; */
v_mm = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* mm = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dx = v_x;
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* mm = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* mm = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dx = v_x;
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* mm += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dy = v_x;
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* mm += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* mm += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dy = v_x;
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* mm += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
v_dz = v_x;
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* mm += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* mm += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_mm = _mm512_add_epi32(v_mm,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
v_dz = v_x;
}
}
/* set new position */
/* ppart[j+npoff] = dx; */
/* ppart[j+nppmx+npoff] = dy; */
/* ppart[j+2*nppmx+npoff] = dz; */
_mm512_store_ps(&ppart[j+npoff],v_dx);
_mm512_store_ps(&ppart[j+nppmx+npoff],v_dy);
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz);
/* set new velocity */
/* ppart[j+3*nppmx+npoff] = vx; */
/* ppart[j+4*nppmx+npoff] = vy; */
/* ppart[j+5*nppmx+npoff] = vz; */
_mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx);
_mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy);
_mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz);
/* increment counters */
/* if (mm > 0) { */
/* ncl[mm+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(kk,v_mm);
for (i = 0; i < 16; i++) {
mm = kk[i];
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + i + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = x - (float) nn;
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amz = 1.0f - dzp;
amy = dxp*amy;
/* find acceleration */
dx = amx*sfxyz[nn] + amy*sfxyz[nn+4];
dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4];
dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4];
dx = amz*(dx + dyp*sfxyz[nn+4*mxv] + dx1*sfxyz[nn+4*mxv+4]);
dy = amz*(dy + dyp*sfxyz[nn+4*mxv+1] + dx1*sfxyz[nn+4*mxv+1+4]);
dz = amz*(dz + dyp*sfxyz[nn+4*mxv+2] + dx1*sfxyz[nn+4*mxv+2+4]);
mm = nn + 4*mxyv;
vx = amx*sfxyz[mm] + amy*sfxyz[mm+4];
vy = amx*sfxyz[mm+1] + amy*sfxyz[mm+1+4];
vz = amx*sfxyz[mm+2] + amy*sfxyz[mm+2+4];
dx = dx + dzp*(vx + dyp*sfxyz[mm+4*mxv] + dx1*sfxyz[mm+4*mxv+4]);
dy = dy + dzp*(vy + dyp*sfxyz[mm+4*mxv+1] + dx1*sfxyz[mm+4*mxv+1+4]);
dz = dz + dzp*(vz + dyp*sfxyz[mm+4*mxv+2] + dx1*sfxyz[mm+4*mxv+2+4]);
/* new velocity */
dxp = ppart[j+3*nppmx+npoff];
dyp = ppart[j+4*nppmx+npoff];
dzp = ppart[j+5*nppmx+npoff];
vx = dxp + qtm*dx;
vy = dyp + qtm*dy;
vz = dzp + qtm*dz;
/* average kinetic energy */
dxp += vx;
dyp += vy;
dzp += vz;
sum1 += dxp*dxp + dyp*dyp+ dzp*dzp;
/* new position */
dx = x + vx*dt;
dy = y + vy*dt;
dz = z + vz*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx = dx - anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0f;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy = dy - any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0f) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0f;
}
else {
mm += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
dz = dz - anz;
mm += 18;
}
else if (dz < edgelz) {
if (dz < 0.0f) {
dz += anz;
if (dz < anz)
mm += 9;
else
dz = 0.0f;
}
else {
mm += 9;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
ppart[j+2*nppmx+npoff] = dz;
/* set new velocity */
ppart[j+3*nppmx+npoff] = vx;
ppart[j+4*nppmx+npoff] = vy;
ppart[j+5*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*l)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*l)] = mm;
}
else {
nh = 1;
}
}
}
/* sum2 += sum1; */
_mm512_store_pd(&dd[0],v_sum1);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (sum1 + dd[0]);
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*l] = ih;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncgppost3lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz,
int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz;
__m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_one;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_as, v_at;
__m512 a, b, c, d, e, f, g, h, qp, qr;
__mmask16 msk, msks, v_m;
__attribute__((aligned(64))) unsigned int kk[16];
__attribute__((aligned(64))) float sq[MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx + 1;
myv = my + 1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
v_mxv = _mm512_set1_epi32(mxv);
v_mxyv = _mm512_set1_epi32(mxyv);
v_qm = _mm512_set1_ps(qm);
v_one = _mm512_set1_ps(1.0f);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \
dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,a,b,c, \
d,e,f,g,h,qp,qr,msk,msks,kk,sq)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < mxyv*(mz+1); j++) { */
/* sq[j] = 0.0f; */
/* } */
memset((void*)sq,0,mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* find interpolation weights */
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm));
v_nn = _mm512_add_epi32(v_nn,v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* a = amx*amz; */
/* b = amy*amz; */
/* d = dyp*amz; */
/* d = dx1*amz; */
a = _mm512_mul_ps(v_amx,v_amz);
b = _mm512_mul_ps(v_amy,v_amz);
c = _mm512_mul_ps(v_dyp,v_amz);
d = _mm512_mul_ps(v_dx1,v_amz);
/* e = amx*dzp; */
/* f = amy*dzp; */
/* g = dyp*dzp; */
/* h = dx1*dzp; */
e = _mm512_mul_ps(v_amx,v_dzp);
f = _mm512_mul_ps(v_amy,v_dzp);
g = _mm512_mul_ps(v_dyp,v_dzp);
h = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit charge */
/* x = sq[nn] + amx*amz; */
/* y = sq[nn+1] + amy*amz; */
/* z = sq[nn+mxv] + dyp*amz; */
/* w = sq[nn+1+mxv] + dx1*amz; */
/* sq[nn] = x; */
/* sq[nn+1] = y; */
/* sq[nn+mxv] = z; */
/* sq[nn+1+mxv] = w; */
/* mm = nn + mxyv; */
/* x = sq[mm] + amx*dzp; */
/* y = sq[mm+1] + amy*dzp; */
/* z = sq[mm+mxv] + dyp*dzp; */
/* w = sq[mm+1+mxv] + dx1*dzp; */
/* sq[mm] = x; */
/* sq[mm+1] = y; */
/* sq[mm+mxv] = z; */
/* sq[mm+1+mxv] = w; */
/* deposit charge for two particles at a time */
for (i = 0; i < 8; i++) {
/* first particle */
mm = kk[2*i];
msk = _mm512_int2mask(3<<(2*i));
msks = _mm512_int2mask(2<<(2*i));
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)a,msks,
(__m512i)b,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)c,msks,
(__m512i)d,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
mm = mm + mxyv;
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)e,msks,
(__m512i)f,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)g,msks,
(__m512i)h,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
/* second particle */
mm = kk[2*i+1];
msks = _mm512_int2mask(1<<(2*i));
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)b,msks,
(__m512i)a,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)d,msks,
(__m512i)c,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
mm = mm + mxyv;
qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]);
qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]);
v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)f,msks,
(__m512i)e,177);
qp = _mm512_mask_add_ps(qp,msk,qp,v_at);
_mm512_mask_packstorelo_ps(&sq[mm],msk,qp);
_mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp);
ll = mm + mxv;
qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]);
qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]);
v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)h,msks,
(__m512i)g,177);
qr = _mm512_mask_add_ps(qr,msk,qr,v_as);
_mm512_mask_packstorelo_ps(&sq[ll],msk,qr);
_mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr);
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
amz = 1.0f - dzp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amy = dxp*amy;
/* deposit charge */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
z = sq[nn+mxv] + dyp*amz;
w = sq[nn+1+mxv] + dx1*amz;
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + amx*dzp;
y = sq[mm+1] + amy*dzp;
z = sq[mm+mxv] + dyp*dzp;
w = sq[mm+1+mxv] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 16*(nn/16);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 16 */
/* for (i = 1; i < nn; i++) { */
/* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */
/* += sq[i+mxv*j+mxyv*k]; */
/* } */
for (i = 0; i < nps; i+=16) {
m = i + mxv*j + mxyv*k;
v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]);
m = i + noff + nxv*(j + moff) + nxyv*(k + loff);
v_at = _mm512_loadunpacklo_ps(v_at,&q[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]);
/* skip add for first element for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&q[m],v_at);
_mm512_packstorehi_ps(&q[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m ; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void cknc2gppost3lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int mz,
int nxv, int nyv, int nzv, int mx1, int my1,
int mxyz1) {
/* for 3d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP/vector version using guard cells
data deposited in tiles
particles stored segmented array
33 flops/particle, 11 loads, 8 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz)
q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz)
q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz)
q(n+1,m+1,l)=qm*dx*dy*(1.-dz)
q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz
q(n+1,m,l+1)=qm*dx*(1.-dy)*dz
q(n,m+1,l+1)=qm*(1.-dx)*dy*dz
q(n+1,m+1,l+1)=qm*dx*dy*dz
where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
q[l][k][j] = charge density at grid point j,k,l
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 6
mx/my/mz = number of grids in sorting cell in x/y/z
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
nzv = third dimension of charge array, must be >= nz+1
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mxyz1 = mx1*my1*mz1,
where mz1 = (system length in z direction - 1)/mz + 1
requires KNC, ppart needs to be 64 byte aligned
nppmx needs to be a multiple of 16
local data */
#define MXV 17
#define MYV 17
#define MZV 17
int mxy1, noff, moff, loff, npoff, npp, nps;
int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv;
float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz;
__m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv;
__m512i v_nn, v_mm, v_ll, v_it;
__m512 v_qm, v_one;
__m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz;
__m512 v_dx1, v_as, v_at;
__mmask16 v_m;
__attribute__((aligned(64))) unsigned int kk[16];
typedef union vfloat {float v[16]; __m512 v16;} vf;
__attribute__((aligned(64))) float sq[MXV*MYV*MZV];
/* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */
vf vv[8];
mxy1 = mx1*my1;
/* mxv = MXV; */
/* myv = MYV; */
mxv = mx + 1;
myv = my + 1;
mxyv = mxv*myv;
nxyv = nxv*nyv;
v_mxv = _mm512_set1_epi32(mxv);
v_mxyv = _mm512_set1_epi32(mxyv);
v_qm = _mm512_set1_ps(qm);
v_one = _mm512_set1_ps(1.0f);
v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,
1.);
v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT);
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */
/* return; */
#pragma omp parallel for \
private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \
dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \
v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,kk,sq,vv)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
v_noff = _mm512_set1_epi32(noff);
v_moff = _mm512_set1_epi32(moff);
v_loff = _mm512_set1_epi32(loff);
npp = kpic[l];
npoff = idimp*nppmx*l;
/* zero out local accumulator */
/* for (j = 0; j < mxyv*(mz+1); j++) { */
/* sq[j] = 0.0f; */
/* } */
memset((void*)sq,0,mxyv*(mz+1)*sizeof(float));
nps = 16*(npp/16);
/* vector loop over particles in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* x = ppart[j+npoff]; */
/* y = ppart[j+nppmx+npoff]; */
/* z = ppart[j+2*nppmx+npoff]; */
v_x = _mm512_load_ps(&ppart[j+npoff]);
v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* nn = x; */
/* mm = y; */
/* ll = z; */
v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z,
_MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE);
/* dxp = qm*(x - (float) nn); */
v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp));
/* dyp = y - (float) mm; */
v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dyp = _mm512_sub_ps(v_y,v_dyp);
/* dzp = z - (float) ll; */
v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dzp = _mm512_sub_ps(v_z,v_dzp);
/* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */
v_nn = _mm512_sub_epi32(v_nn,v_noff);
v_mm = _mm512_sub_epi32(v_mm,v_moff);
v_ll = _mm512_sub_epi32(v_ll,v_loff);
v_it = _mm512_mullo_epi32(v_mxyv,v_ll);
v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm));
v_nn = _mm512_add_epi32(v_nn,v_it);
/* amx = qm - dxp; */
/* amy = 1.0f - dyp; */
/* amz = 1.0f - dzp; */
v_amx = _mm512_sub_ps(v_qm,v_dxp);
v_amy = _mm512_sub_ps(v_one,v_dyp);
v_amz = _mm512_sub_ps(v_one,v_dzp);
/* dx1 = dxp*dyp; */
/* dyp = amx*dyp; */
/* amx = amx*amy; */
/* amy = dxp*amy; */
v_dx1 = _mm512_mul_ps(v_dxp,v_dyp);
v_dyp = _mm512_mul_ps(v_amx,v_dyp);
v_amx = _mm512_mul_ps(v_amx,v_amy);
v_amy = _mm512_mul_ps(v_dxp,v_amy);
/* x = amx*amz; */
/* y = amy*amz; */
/* z = dyp*amz; */
/* w = dx1*amz; */
vv[0].v16 = _mm512_mul_ps(v_amx,v_amz);
vv[1].v16 = _mm512_mul_ps(v_amy,v_amz);
vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz);
vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz);
vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp);
vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp);
vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp);
vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp);
_mm512_store_epi32(kk,v_nn);
/* deposit charge */
/* x = sq[nn] + amx*amz; */
/* y = sq[nn+1] + amy*amz; */
/* z = sq[nn+mxv] + dyp*amz; */
/* w = sq[nn+1+mxv] + dx1*amz; */
/* sq[nn] = x; */
/* sq[nn+1] = y; */
/* sq[nn+mxv] = z; */
/* sq[nn+1+mxv] = w; */
/* mm = nn + mxyv; */
/* x = sq[mm] + amx*dzp; */
/* y = sq[mm+1] + amy*dzp; */
/* z = sq[mm+mxv] + dyp*dzp; */
/* w = sq[mm+1+mxv] + dx1*dzp; */
/* sq[mm] = x; */
/* sq[mm+1] = y; */
/* sq[mm+mxv] = z; */
/* sq[mm+1+mxv] = w; */
for (i = 0; i < 16; i++) {
nn = kk[i];
x = sq[nn] + vv[0].v[i];
y = sq[nn+1] + vv[1].v[i];
z = sq[nn+mxv] + vv[2].v[i];
w = sq[nn+1+mxv] + vv[3].v[i];
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + vv[4].v[i];
y = sq[mm+1] + vv[5].v[i];
z = sq[mm+mxv] + vv[6].v[i];
w = sq[mm+1+mxv] + vv[7].v[i];
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
}
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
z = ppart[j+2*nppmx+npoff];
nn = x;
mm = y;
ll = z;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
dzp = z - (float) ll;
nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff);
amx = qm - dxp;
amy = 1.0f - dyp;
amz = 1.0f - dzp;
dx1 = dxp*dyp;
dyp = amx*dyp;
amx = amx*amy;
amy = dxp*amy;
/* deposit charge */
x = sq[nn] + amx*amz;
y = sq[nn+1] + amy*amz;
z = sq[nn+mxv] + dyp*amz;
w = sq[nn+1+mxv] + dx1*amz;
sq[nn] = x;
sq[nn+1] = y;
sq[nn+mxv] = z;
sq[nn+1+mxv] = w;
mm = nn + mxyv;
x = sq[mm] + amx*dzp;
y = sq[mm+1] + amy*dzp;
z = sq[mm+mxv] + dyp*dzp;
w = sq[mm+1+mxv] + dx1*dzp;
sq[mm] = x;
sq[mm+1] = y;
sq[mm+mxv] = z;
sq[mm+1+mxv] = w;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
nn = mx < nn ? mx : nn;
mm = nyv - moff;
mm = my < mm ? my : mm;
ll = nzv - loff;
ll = mz < ll ? mz : ll;
nps = 16*(nn/16);
for (k = 1; k < ll; k++) {
for (j = 1; j < mm; j++) {
/* vector loop over elements in blocks of 4 */
/* for (i = 1; i < nn; i++) { */
/* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */
/* += sq[i+mxv*j+mxyv*k]; */
/* } */
for (i = 0; i < nps; i+=16) {
m = i + mxv*j + mxyv*k;
v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]);
v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]);
m = i + noff + nxv*(j + moff) + nxyv*(k + loff);
v_at = _mm512_loadunpacklo_ps(v_at,&q[m]);
v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]);
/* skip add for first element for i = 0 */
if (i==0)
v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as);
else
v_at = _mm512_add_ps(v_at,v_as);
_mm512_packstorelo_ps(&q[m],v_at);
_mm512_packstorehi_ps(&q[m+16],v_at);
}
/* loop over remaining elements */
m = 1 > nps ? 1 : nps;
for (i = m ; i < nn; i++) {
q[i+noff+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[i+mxv*j+mxyv*k];
}
}
}
/* deposit charge to edge points in global array */
lm = nzv - loff;
lm = mz+1 < lm ? mz+1 : lm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j];
if (lm > mz) {
#pragma omp atomic
q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[i+mxv*j+mxyv*(lm-1)];
}
}
}
nm = nxv - noff;
nm = mx+1 < nm ? mx+1 : nm;
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (k = 0; k < ll; k++) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)]
+= sq[i+mxv*(mm-1)+mxyv*k];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)]
+= sq[nm-1+mxv*j+mxyv*k];
}
}
}
if (lm > mz) {
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)]
+= sq[i+mxv*(mm-1)+mxyv*(lm-1)];
}
}
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[mxv*j+mxyv*(lm-1)];
if (nm > mx) {
#pragma omp atomic
q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)]
+= sq[nm-1+mxv*j+mxyv*(lm-1)];
}
}
}
}
return;
#undef MXV
#undef MYV
#undef MZV
}
/*--------------------------------------------------------------------*/
void ckncpporder3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int nx, int ny, int nz, int mx, int my, int mz,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
nx/ny/nz = system length in x/y/z direction
mx/my/mz = number of grids in sorting cell in x/y/z
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz;
float dx, dy, dz;
int ks[26];
__m512i v_ist, v_it, v_0, v_1, v_3, v_9;
__m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_anx, v_any, v_anz;
__m512 v_dx, v_dy, v_dz, v_x;
__m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz;
__m512 v_zero;
__mmask16 msk1, msk2;
__attribute__((aligned(64))) unsigned int ls[32], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
anx = (float) nx;
any = (float) ny;
anz = (float) nz;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_3 = _mm512_set1_epi32(3);
v_9 = _mm512_set1_epi32(9);
v_anx = _mm512_set1_ps(anx);
v_any = _mm512_set1_ps(any);
v_anz = _mm512_set1_ps(anz);
v_zero = _mm512_setzero_ps();
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \
dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \
v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \
msk2,ls)
for (l = 0; l < mxyz1; l++) {
loff = l/mxy1;
k = l - mxy1*loff;
loff = mz*loff;
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[l];
npoff = idimp*nppmx*l;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ll = nz - loff;
ll = mz < ll ? mz : ll;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
edgelz = loff;
edgerz = loff + ll;
noff = (ntmax+1)*l;
v_edgelx = _mm512_set1_ps(edgelx);
v_edgely = _mm512_set1_ps(edgely);
v_edgelz = _mm512_set1_ps(edgelz);
v_edgerx = _mm512_set1_ps(edgerx);
v_edgery = _mm512_set1_ps(edgery);
v_edgerz = _mm512_set1_ps(edgerz);
/* clear counters */
/* for (j = 0; j < 26; j++) { */
/* ncl[j+26*l] = 0; */
/* } */
memset((void*)&ncl[26*l],0,26*sizeof(int));
nps = 16*(npp/16);
/* loop over particles in tile in blocks of 16 */
for (j = 0; j < nps; j+=16) {
/* dx = ppart[j+npoff]; */
/* dy = ppart[j+nppmx+npoff]; */
/* dz = ppart[j+2*nppmx+npoff]; */
v_dx = _mm512_load_ps(&ppart[j+npoff]);
v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]);
v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]);
/* find particles going out of bounds */
/* ist = 0; */
v_ist = _mm512_setzero_epi32();
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
/* if (dx >= edgerx) { */
/* if (dx >= anx) */
/* ppart[j+npoff] = dx - anx; */
/* ist = 2; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dx;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_1,v_1);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
/* if (dx < edgelx) { */
/* if (dx < 0.0) { */
/* dx += anx; */
/* if (dx < anx) */
/* ist = 1; */
/* else */
/* dx = 0.0; */
/* ppart[j+npoff] = dx; */
/* } */
/* else { */
/* ist = 1; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1);
msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx);
msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+npoff],v_x);
}
}
/* if (dy >= edgery) { */
/* if (dy >= any) */
/* ppart[j+nppmx+npoff] = dy - any; */
/* ist += 6; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dy;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_3,v_3);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
/* if (dy < edgely) { */
/* if (dy < 0.0) { */
/* dy += any; */
/* if (dy < any) */
/* ist += 3; */
/* else */
/* dy = 0.0; */
/* ppart[j+nppmx+npoff] = dy; */
/* } */
/* else { */
/* ist += 3; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3);
msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any);
msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+nppmx+npoff],v_x);
}
}
/* if (dz >= edgerz) { */
/* if (dz >= anz) */
/* ppart[j+2*nppmx+npoff] = dz - anz; */
/* ist += 18; */
/* } */
msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE);
msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT);
ii = _mm512_mask2int(_mm512_kor(msk1,msk2));
/* execute if either test result is true for any particle */
if (ii != 0) {
ii = _mm512_mask2int(msk1);
v_x = v_dz;
/* write output if test result is true for any particle */
if (ii != 0) {
v_it = _mm512_add_epi32(v_9,v_9);
v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it);
msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE);
v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz);
ii = _mm512_mask2int(msk1);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
/* if (dz < edgelz) { */
/* if (dz < 0.0) { */
/* dz += anz; */
/* if (dz < anz) */
/* ist += 9; */
/* else */
/* dz = 0.0; */
/* ppart[j+2*nppmx+npoff] = dz; */
/* } */
/* else { */
/* ist += 9; */
/* } */
/* } */
/* write output if test result is true for any particle */
ii = _mm512_mask2int(msk2);
if (ii != 0) {
v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9);
msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT);
v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz);
msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE);
msk1 = _mm512_kand(msk1,msk2);
v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero);
v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0);
v_ist = _mm512_add_epi32(v_ist,v_it);
ii = _mm512_mask2int(msk2);
if (ii != 0)
_mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x);
}
}
/* increment counters */
/* if (ist > 0) { */
/* ncl[ist+26*l-1] += 1; */
/* ih += 1; */
/* if (ih <= ntmax) { */
/* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */
/* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */
/* } */
/* else { */
/* nh = 1; */
/* } */
/* } */
_mm512_store_epi32(ls,v_ist);
for (i = 0; i < 16; i++) {
ist = ls[i];
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + i + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
}
/* loop over remaining particles in tile */
for (j = nps; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
dz = ppart[j+2*nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (dz >= edgerz) {
if (dz >= anz)
ppart[j+2*nppmx+npoff] = dz - anz;
ist += 18;
}
else if (dz < edgelz) {
if (dz < 0.0) {
dz += anz;
if (dz < anz)
ist += 9;
else
dz = 0.0;
ppart[j+2*nppmx+npoff] = dz;
}
else {
ist += 9;
}
}
if (ist > 0) {
ncl[ist+26*l-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+noff)] = j + 1;
ihole[1+2*(ih+noff)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*noff] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \
v_it0,v_ioff,ls,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(ls,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&ls[16],v_is);
v_ioff = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&ls[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m1,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_ioff);
/* next offset */
if (j==0) {
v_ioff = _mm512_shuffle_epi32(v_it,255);
v_ioff = _mm512_permute4f128_epi32(v_ioff,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&ls[j],v_it);
}
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
/* buffer particles that are leaving tile, in direction order */
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
/* store 26 data elements into ncl */
mm = 26*l;
v_it = _mm512_load_epi32(ls);
v_is = _mm512_load_epi32(&ls[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void ckncpporderf3lt(float ppart[], float ppbuff[], int kpic[],
int ncl[], int ihole[], int idimp, int nppmx,
int mx1, int my1, int mz1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 3D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
ckncgppushf3lt subroutine.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = position z of particle n in tile m
ppbuff[m][i][n] = i co-ordinate of particle n in tile m
kpic[m] = number of particles in tile m
ncl[m][i] = number of particles going to destination i, tile m
ihole[m][:][0] = location of hole in array left by departing particle
ihole[m][:][1] = direction destination of particle leaving hole
all for tile m
ihole[m][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 6
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
mz1 = (system length in z direction - 1)/mz + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
requires KNC, ppart, ppbuff need to be 64 byte aligned
nppmx, npbmx need to be a multiple of 16
local data */
int mxy1, mxyz1, noff, npp, npoff, nps, nboff, ncoff;
int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr;
int ks[26];
__m512i v_it, v_0, v_1;
__m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff;
__m512 v_x, v_zero;
__mmask16 msk1;
__attribute__((aligned(64))) unsigned int ls[32], lm[32];
mxy1 = mx1*my1;
mxyz1 = mxy1*mz1;
v_0 = _mm512_set1_epi32(0);
v_1 = _mm512_set1_epi32(1);
v_zero = _mm512_setzero_ps();
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
msk1 = _mm512_int2mask(1023);
v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0);
v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0);
#pragma omp parallel for \
private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \
v_it0,v_ioff,ls,lm)
for (l = 0; l < mxyz1; l++) {
npoff = idimp*nppmx*l;
nboff = idimp*npbmx*l;
noff = (ntmax+1)*l;
/* find address offset for ordered ppbuff array */
/* isum = 0; */
/* for (j = 0; j < 26; j++) { */
/* ist = ncl[j+26*l]; */
/* ncl[j+26*l] = isum; */
/* isum += ist; */
/* } */
/* perform exclusive prefix scan */
/* load 26 data elements into 32 length vector with zero padding */
mm = 26*l;
v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]);
_mm512_store_epi32(ls,v_it);
v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]);
v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]);
_mm512_store_epi32(&ls[16],v_is);
v_ioff = _mm512_setzero_epi32();
/* vector loop over elements in blocks of 16 */
for (j = 0; j < 32; j+=16) {
/* load data */
v_it0 = _mm512_load_epi32(&ls[j]);
/* first pass */
v_is = _mm512_shuffle_epi32(v_it0,177);
v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690),
v_it0,v_is);
/* second pass */
v_is = _mm512_shuffle_epi32(v_it,80);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it,
v_is);
/* third pass */
v_is = _mm512_permutevar_epi32(v_m1,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it,
v_is);
/* fourth pass */
v_is = _mm512_permutevar_epi32(v_m2,v_it);
v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it,
v_is);
/* add offset */
v_it = _mm512_add_epi32(v_it,v_ioff);
/* next offset */
if (j==0) {
v_ioff = _mm512_shuffle_epi32(v_it,255);
v_ioff = _mm512_permute4f128_epi32(v_ioff,255);
}
/* subtract for exclusive scan */
v_it = _mm512_sub_epi32(v_it,v_it0);
/* write data */
_mm512_store_epi32(&ls[j],v_it);
}
nh = ihole[2*noff];
nps = 16*(nh/16);
/* nps = (nh >> 4) << 4; */
ip = 0;
/* loop over particles leaving tile in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */
/* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */
mm = 2*(j+1+noff);
v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]);
_mm512_store_epi32(lm,v_it);
mm += 16;
v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]);
v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]);
_mm512_store_epi32(&lm[16],v_is);
/* buffer particles that are leaving tile, in direction order */
for (ll = 0; ll < 16; ll++) {
j1 = lm[2*ll] - 1;
ist = lm[1+2*ll];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
}
/* loop over remaining particles leaving tile */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+noff)] - 1;
ist = ihole[1+2*(j+1+noff)];
ii = ls[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ls[ist-1] = ii + 1;
}
/* store 26 data elements into ncl */
mm = 26*l;
v_it = _mm512_load_epi32(ls);
v_is = _mm512_load_epi32(&ls[16]);
_mm512_packstorelo_epi32(&ncl[mm],v_it);
_mm512_packstorehi_epi32(&ncl[mm+16],v_it);
_mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is);
_mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is);
/* set error */
if (ip > 0)
*irc = ncl[25+26*l];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
v_m1 = _mm512_set1_epi32(nppmx);
#pragma omp parallel for \
private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \
lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \
v_npp,v_x,msk1,ks,ls)
for (l = 0; l < mxyz1; l++) {
npp = kpic[l];
npoff = idimp*nppmx*l;
noff = (ntmax+1)*l;
v_m2 = _mm512_set1_epi32(noff+1);
v_m3 = _mm512_set1_epi32(npoff);
kz = l/mxy1;
k = l - mxy1*kz;
/* loop over tiles in z, assume periodic boundary conditions */
lk = kz*mxy1;
/* find tile behind */
ll = kz - 1;
if (ll < 0)
ll += mz1;
ll = ll*mxy1;
/* find tile in front */
lr = kz + 1;
if (lr >= mz1)
lr -= mz1;
lr = lr*mxy1;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1 ;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk + lk;
ks[1] = kxl + kk + lk;
ks[2] = kx + kr + lk;
ks[3] = kxr + kr + lk;
ks[4] = kxl + kr + lk;
ks[5] = kx + kl + lk;
ks[6] = kxr + kl + lk;
ks[7] = kxl + kl + lk;
ks[8] = kx + kk + lr;
ks[9] = kxr + kk + lr;
ks[10] = kxl + kk + lr;
ks[11] = kx + kr + lr;
ks[12] = kxr + kr + lr;
ks[13] = kxl + kr + lr;
ks[14] = kx + kl + lr;
ks[15] = kxr + kl + lr;
ks[16] = kxl + kl + lr;
ks[17] = kx + kk + ll;
ks[18] = kxr + kk + ll;
ks[19] = kxl + kk + ll;
ks[20] = kx + kr + ll;
ks[21] = kxr + kr + ll;
ks[22] = kxl + kr + ll;
ks[23] = kx + kl + ll;
ks[24] = kxr + kl + ll;
ks[25] = kxl + kl + ll;
/* loop over directions */
nh = ihole[2*noff];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
v_it0 = _mm512_set1_epi32(nh);
v_is = _mm512_add_epi32(v_m2,v_it0);
v_it0 = _mm512_sub_epi32(v_ioff,v_it0);
v_npp = _mm512_set1_epi32(npp);
for (ii = 0; ii < 26; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+26*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+26*ks[ii]] - ncoff;
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in this direction in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* insert incoming particles into holes */
/* ih += 1; */
/* if (ih <= nh) { */
/* j1 = ihole[2*(ih+noff)] - 1; */
/* } */
/* place overflow at end of array */
/* else { */
/* j1 = npp; */
/* npp += 1; */
/* } */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0);
msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is);
v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm);
v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm,
v_npp);
v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm,
(int *)ihole,4);
v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1);
ih += 16;
nn = ih - nh;
if (nn > 0) {
nn = nn < 16 ? nn : 16;
npp += nn;
}
msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT);
ll = _mm512_mask2int(_mm512_knot(msk1));
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* if (j1 < nppmx) */
/* ppart[j1+nppmx*i+npoff] */
/* = ppbuff[j+ncoff+npbmx*i+nboff]; */
mm = j + ncoff + npbmx*i + nboff;
v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]);
v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]);
if (ll==0) {
_mm512_i32scatter_ps((float *)ppart,v_it,v_x,4);
}
else {
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it,
v_x,4);
}
v_it = _mm512_add_epi32(v_it,v_m1);
}
if (ll != 0) {
ist = 1;
}
}
/* loop over remaining particles in this direction */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+noff)] - 1;
v_it0 = _mm512_set1_epi32(nn);
ih += 1;
j2 = ihole[2*(ih+noff)] - 1;
v_m2 = _mm512_sub_epi32(v_m2,v_1);
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
/* nps = 16*(ip/16); */
nps = (ip >> 4) << 4;
/* loop over particles in groups of 16 */
for (j = 0; j < nps; j+=16) {
/* j2 = ihole[2*(ih+noff)] - 1; */
v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff);
v_mm = _mm512_add_epi32(v_mm,v_m2);
v_mm = _mm512_add_epi32(v_mm,v_mm);
v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4);
v_is = _mm512_sub_epi32(v_is,v_1);
/* j1 = npp - j - 1; */
/* if (j1==nn) { */
/* ii -= 1; */
/* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */
/* } */
kk = 0;
for (ll = 0; ll < 16; ll++) {
j1 = npp - j - ll - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*l)] - 1;
}
else {
ls[kk] = j1;
kk += 1;
}
}
v_it = _mm512_load_epi32(ls);
v_it0 = _mm512_set1_epi32(kk);
msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT);
v_is = _mm512_add_epi32(v_is,v_m3);
v_it = _mm512_add_epi32(v_it,v_m3);
for (i = 0; i < idimp; i++) {
/* ppart[j2+nppmx*i+npoff] */
/* = ppart[j1+nppmx*i+npoff]; */
if (kk==16) {
v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4);
_mm512_i32scatter_ps((float *)ppart,v_is,v_x,4);
}
else {
v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it,
(float *)ppart,4);
_mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is,
v_x,4);
}
v_is = _mm512_add_epi32(v_is,v_m1);
v_it = _mm512_add_epi32(v_it,v_m1);
}
ih += kk;
/* holes with locations great than npp-ip do not need to be filled */
}
/* loop over remaining particles */
if (nps < ip) {
nn = ihole[2*(ii+noff)] - 1;
j2 = ihole[2*(ih+noff)] - 1;
}
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+noff)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*l)] - 1;
}
}
npp -= ip;
}
kpic[l] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cknccguard3l(float fxyz[], int nx, int ny, int nz, int nxe,
int nye, int nze) {
/* replicate extended periodic vector field fxyz
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, fxyz needs to be 64 byte aligned
nxe needs to be a multiple of 4
local data */
#define N 4
int j, k, l, nxs, nxyen, ll;
nxs = 4*(nx/4);
nxyen = N*nxe*nye;
/* copy edges of extended field */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,ll)
for (l = 0; l < nz; l++) {
ll = nxyen*l;
for (k = 0; k < ny; k++) {
fxyz[N*nx+N*nxe*k+ll] = fxyz[N*nxe*k+ll];
fxyz[1+N*nx+N*nxe*k+ll] = fxyz[1+N*nxe*k+ll];
fxyz[2+N*nx+N*nxe*k+ll] = fxyz[2+N*nxe*k+ll];
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+ll],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+ll]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[N*j+N*nxe*ny+ll] = fxyz[N*j+ll];
fxyz[1+N*j+N*nxe*ny+ll] = fxyz[1+N*j+ll];
fxyz[2+N*j+N*nxe*ny+ll] = fxyz[2+N*j+ll];
}
fxyz[N*nx+N*nxe*ny+ll] = fxyz[ll];
fxyz[1+N*nx+N*nxe*ny+ll] = fxyz[1+ll];
fxyz[2+N*nx+N*nxe*ny+ll] = fxyz[2+ll];
}
#pragma omp for \
private(j,k)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[N*j+N*nxe*k+nxyen*nz],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+N*nxe*k]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[N*j+N*nxe*k+nxyen*nz] = fxyz[N*j+N*nxe*k];
fxyz[1+N*j+N*nxe*k+nxyen*nz] = fxyz[1+N*j+N*nxe*k];
fxyz[2+N*j+N*nxe*k+nxyen*nz] = fxyz[2+N*j+N*nxe*k];
}
fxyz[N*nx+N*nxe*k+nxyen*nz] = fxyz[N*nxe*k];
fxyz[1+N*nx+N*nxe*k+nxyen*nz] = fxyz[1+N*nxe*k];
fxyz[2+N*nx+N*nxe*k+nxyen*nz] = fxyz[2+N*nxe*k];
}
}
/* vector loop over elements in blocks of 4 */
for (j = 0; j < nxs; j+=4) {
_mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+nxyen*nz],
_mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j]));
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
fxyz[N*j+N*nxe*ny+nxyen*nz] = fxyz[N*j];
fxyz[1+N*j+N*nxe*ny+nxyen*nz] = fxyz[1+N*j];
fxyz[2+N*j+N*nxe*ny+nxyen*nz] = fxyz[2+N*j];
}
fxyz[N*nx+N*nxe*ny+nxyen*nz] = fxyz[0];
fxyz[1+N*nx+N*nxe*ny+nxyen*nz] = fxyz[1];
fxyz[2+N*nx+N*nxe*ny+nxyen*nz] = fxyz[2];
return;
#undef N
}
/*--------------------------------------------------------------------*/
void ckncaguard3l(float q[], int nx, int ny, int nz, int nxe, int nye,
int nze) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny/nz = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nye = second dimension of field arrays, must be >= ny+1
nze = third dimension of field arrays, must be >= nz+1
requires KNC, q needs to be 64 byte aligned
nxe needs to be a multiple of 16
local data */
int j, k, l, nxs, nxye, ll;
__m512 v_q;
nxs = 16*(nx/16);
nxye = nxe*nye;
/* accumulate edges of extended field */
#pragma omp parallel
{
#pragma omp for \
private(j,k,l,ll,v_q)
for (l = 0; l < nz; l++) {
ll = nxye*l;
for (k = 0; k < ny; k++) {
q[nxe*k+ll] += q[nx+nxe*k+ll];
q[nx+nxe*k+ll] = 0.0;
}
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*ny+ll]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j+ll]),v_q);
_mm512_store_ps(&q[j+ll],v_q);
_mm512_store_ps(&q[j+nxe*ny+ll],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j+ll] += q[j+nxe*ny+ll];
q[j+nxe*ny+ll] = 0.0;
}
q[ll] += q[nx+nxe*ny+ll];
q[nx+nxe*ny+ll] = 0.0;
}
#pragma omp for \
private(j,k,v_q)
for (k = 0; k < ny; k++) {
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*k+nxye*nz]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j+nxe*k]),v_q);
_mm512_store_ps(&q[j+nxe*k],v_q);
_mm512_store_ps(&q[j+nxe*k+nxye*nz],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j+nxe*k] += q[j+nxe*k+nxye*nz];
q[j+nxe*k+nxye*nz] = 0.0;
}
q[nxe*k] += q[nx+nxe*k+nxye*nz];
q[nx+nxe*k+nxye*nz] = 0.0;
}
}
/* vector loop over elements in blocks of 16 */
for (j = 0; j < nxs; j+=16) {
v_q = _mm512_load_ps(&q[j+nxe*ny+nxye*nz]);
v_q = _mm512_add_ps(_mm512_load_ps(&q[j]),v_q);
_mm512_store_ps(&q[j],v_q);
_mm512_store_ps(&q[j+nxe*ny+nxye*nz],_mm512_setzero_ps());
}
/* loop over remaining elements */
for (j = nxs; j < nx; j++) {
q[j] += q[j+nxe*ny+nxye*nz];
q[j+nxe*ny+nxye*nz] = 0.0;
}
q[0] += q[nx+nxe*ny+nxye*nz];
q[nx+nxe*ny+nxye*nz] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void ckncmpois33(float complex q[], float complex fxyz[], int isign,
float complex ffc[], float ax, float ay, float az,
float affp, float *we, int nx, int ny, int nz,
int nxvh, int nyv, int nzv, int nxhd, int nyhd,
int nzhd) {
/* this subroutine solves 3d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions.
for isign = 0, output: ffc
input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
for isign = -1, output: fxyz, we
input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd
approximate flop count is:
59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc)
where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
equation used is:
fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx],
fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx],
fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx],
where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and
j,k,l = fourier mode numbers,
g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx],
s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0,
fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0,
fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0,
fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0.
q[l][k][j] = complex charge density for fourier mode (j,k,l)
fxyz[l][k][j][0] = x component of complex force/charge
fxyz[l][k][j][1] = y component of complex force/charge
fxyz[l][k][j][2] = z component of complex force/charge
all for fourier mode (j,k,l)
cimag(ffc[l][k][j]) = finite-size particle shape factor s
for fourier mode (j,k,l)
creal(ffc[l][k][j]) = potential green's function g
for fourier mode (j,k,l)
ax/ay/az = half-width of particle in x/y/z direction
affp = normalization constant = nx*ny*nz/np,
where np=number of particles
electric field energy is also calculated, using
we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*
|q[kz][ky][kx]*s[kz][ky][kx]|**2)
nx/ny/nz = system length in x/y/z direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nzv = third dimension of field arrays, must be >= nz
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
nzhd = third dimension of form factor array, must be >= nzh
requires KNC, q, fxy, ffc need to be 64 byte aligned
nxhd, nxvh need to be a multiple of 8
fxyz needs to have 4 components
local data */
int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj;
int nxyhd, nxvyh;
float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6;
float complex zero, zt1, zt2;
double wp, sum1, sum2;
__m512i v_j, v_it, v_perm;
__m512 v_dnx, v_dny, v_dnz, v_dky, v_dkz, v_at1, v_at2, v_at3, v_at4;
__m512 v_zero, v_zt1, v_zt2, v_zt3, v_zt4;
__m512 a, b, c, d, e, f, g, h;
__m512d v_wp, v_d;
__attribute__((aligned(64))) double dd[8];
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
nzh = 1 > nz/2 ? 1 : nz/2;
nxhs = 8*(nxh/8);
itn = 1 > nxhs ? 1 : nxhs;
nxyhd = nxhd*nyhd;
nxvyh = nxvh*nyv;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dnz = 6.28318530717959/(float) nz;
zero = 0.0 + 0.0*_Complex_I;
v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0);
v_dnx = _mm512_set1_ps(dnx);
v_dny = _mm512_set1_ps(dny);
v_dnz = _mm512_set1_ps(dnz);
v_zero = _mm512_setzero_ps();
v_perm = _mm512_set_epi32(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0);
if (isign != 0)
goto L40;
/* prepare form factor array */
for (l = 0; l < nzh; l++) {
dkz = dnz*(float) l;
ll = nxyhd*l;
at1 = dkz*dkz;
at2 = pow((dkz*az),2);
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at3 = dky*dky + at1;
at4 = pow((dky*ay),2) + at2;
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at5 = dkx*dkx + at3;
at6 = exp(-0.5*(pow((dkx*ax),2) + at4));
if (at5==0.0) {
ffc[j+kk+ll] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I;
}
}
}
}
return;
/* calculate force/charge and sum field energy */
L40: sum1 = 0.0;
/* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */
#pragma omp parallel
{
#pragma omp for nowait \
private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp, \
v_it,v_dky,v_dkz,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,a,b, \
c,d,e,f,g,h,v_d,v_wp,dd) \
reduction(+:sum1)
for (l = 1; l < nzh; l++) {
dkz = dnz*(float) l;
v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(
_mm512_set1_epi32(l),_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dkz = _mm512_mul_ps(v_dnz,v_dkz);
ll = nxyhd*l;
lj = nxvyh*l;
l1 = nxvyh*nz - lj;
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
v_it = _mm512_set1_epi32(k);
v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_dky = _mm512_mul_ps(v_dny,v_dky);
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+kk+ll]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at3 = dky*at1; */
v_at3 = _mm512_mul_ps(v_dky,v_at1);
/* at4 = dkz*at1; */
v_at4 = _mm512_mul_ps(v_dkz,v_at1);
/* zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj+lj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1+lj]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj+lj)] = at2*zt1; */
/* fxyz[1+4*(j+kj+lj)] = at3*zt1; */
/* fxyz[2+4*(j+kj+lj)] = at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = _mm512_mul_ps(v_at4,v_zt1);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+lj)],d);
/* fxyz[4*(j+k1+lj)] = at2*zt2; */
/* fxyz[1+4*(j+k1+lj)] = -at3*zt2; */
/* fxyz[2+4*(j+k1+lj)] = at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = _mm512_mul_ps(v_at4,v_zt2);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],d);
/* wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) */
/* + q[j+k1+lj]*conjf(q[j+k1+lj])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+kj+l1]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+k1+l1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+kj+l1)] = at2*zt1; */
/* fxyz[1+4*(j+kj+l1)] = at3*zt1; */
/* fxyz[2+4*(j+kj+l1)] = -at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = _mm512_mul_ps(v_at3,v_zt1);
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt1));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],d);
/* fxyz[4*(j+k1+l1)] = at2*zt2; */
/* fxyz[1+4*(j+k1+l1)] = -at3*zt2; */
/* fxyz[2+4*(j+k1+l1)] = -at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2));
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),
c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),
a,78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,
_mm512_int2mask(255),b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),
g,177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),
e,177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),
h,177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),
f,177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],d);
/* wp += at1*(q[j+kj+l1]*conjf(q[j+kj+l1]) */
/* + q[j+k1+l1]*conjf(q[j+k1+l1])); */
v_zt4 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt4 = _mm512_add_ps(v_zt4,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_at1,v_zt4));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at4 = dkz*at1;
zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I;
zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I;
fxyz[4*(j+kj+lj)] = at2*zt1;
fxyz[1+4*(j+kj+lj)] = at3*zt1;
fxyz[2+4*(j+kj+lj)] = at4*zt1;
fxyz[4*(j+k1+lj)] = at2*zt2;
fxyz[1+4*(j+k1+lj)] = -at3*zt2;
fxyz[2+4*(j+k1+lj)] = at4*zt2;
zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I;
zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I;
fxyz[4*(j+kj+l1)] = at2*zt1;
fxyz[1+4*(j+kj+l1)] = at3*zt1;
fxyz[2+4*(j+kj+l1)] = -at4*zt1;
fxyz[4*(j+k1+l1)] = at2*zt2;
fxyz[1+4*(j+k1+l1)] = -at3*zt2;
fxyz[2+4*(j+k1+l1)] = -at4*zt2;
at1 = at1*(q[j+kj+lj]*conjf(q[j+kj+lj])
+ q[j+k1+lj]*conjf(q[j+k1+lj])
+ q[j+kj+l1]*conjf(q[j+kj+l1])
+ q[j+k1+l1]*conjf(q[j+k1+l1]));
wp += (double) at1;
}
}
/* mode numbers kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]);
at3 = at1*dny*(float) k;
at4 = dkz*at1;
zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I;
zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I;
fxyz[4*(kj+lj)] = zero;
fxyz[1+4*(kj+lj)] = at3*zt1;
fxyz[2+4*(kj+lj)] = at4*zt1;
fxyz[4*(k1+lj)] = zero;
fxyz[1+4*(k1+lj)] = zero;
fxyz[2+4*(k1+lj)] = zero;
fxyz[4*(kj+l1)] = zero;
fxyz[1+4*(kj+l1)] = at3*zt2;
fxyz[2+4*(kj+l1)] = -at4*zt2;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[kj+lj]*conjf(q[kj+lj])
+ q[kj+l1]*conjf(q[kj+l1]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); */
v_at1 = _mm512_load_ps((float *)&ffc[j+ll]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* at4 = dkz*at1; */
v_at4 = _mm512_mul_ps(v_dkz,v_at1);
/* zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j+lj]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),
v_zero,v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; */
v_zt2 = _mm512_load_ps((float *)&q[j+l1]);
v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845),
v_zero,v_zt2);
v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),
v_zero);
v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3),
v_zero);
}
/* fxyz[4*(j+lj)] = at2*zt1; */
/* fxyz[1+4*(j+lj)] = zero; */
/* fxyz[2+4*(j+lj)] = at4*zt1; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = v_zero;
c = _mm512_mul_ps(v_at4,v_zt1);
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,
78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,
78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),
b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+lj)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+lj)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+lj)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+lj)],d);
/* fxyz[4*(j+k1+lj)] = zero; */
/* fxyz[1+4*(j+k1+lj)] = zero; */
/* fxyz[2+4*(j+k1+lj)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],v_zero);
/* fxyz[4*(j+l1)] = at2*zt2; */
/* fxyz[1+4*(j+l1)] = zero; */
/* fxyz[2+4*(j+l1)] = -at4*zt2; */
a = _mm512_mul_ps(v_at2,v_zt2);
b = v_zero;
c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2));
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,
78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,
78);
g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280),
v_zero,78);
h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),
b,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g,
177);
b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h,
177);
d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*(j+l1)],a);
_mm512_store_ps((float *)&fxyz[8+4*(j+l1)],b);
_mm512_store_ps((float *)&fxyz[16+4*(j+l1)],c);
_mm512_store_ps((float *)&fxyz[24+4*(j+l1)],d);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* wp += at1*(q[j+lj]*conjf(q[j+lj]) */
/* + q[j+l1]*conjf(q[j+l1])); */
v_zt3 = _mm512_mul_ps(v_zt1,v_zt1);
v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2));
v_zt3 = _mm512_mul_ps(v_at1,v_zt3);
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]);
at2 = at1*dnx*(float) j;
at4 = dkz*at1;
zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I;
zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I;
fxyz[4*(j+lj)] = at2*zt1;
fxyz[1+4*(j+lj)] = zero;
fxyz[2+4*(j+lj)] = at4*zt1;
fxyz[4*(j+k1+lj)] = zero;
fxyz[1+4*(j+k1+lj)] = zero;
fxyz[2+4*(j+k1+lj)] = zero;
fxyz[4*(j+l1)] = at2*zt2;
fxyz[1+4*(j+l1)] = zero;
fxyz[2+4*(j+l1)] = -at4*zt2;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j+lj]*conjf(q[j+lj])
+ q[j+l1]*conjf(q[j+l1]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[ll])*cimagf(ffc[ll]);
at4 = dkz*at1;
zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I;
fxyz[4*lj] = zero;
fxyz[1+4*lj] = zero;
fxyz[2+4*lj] = at4*zt1;
fxyz[4*(k1+lj)] = zero;
fxyz[1+4*(k1+lj)] = zero;
fxyz[2+4*(k1+lj)] = zero;
fxyz[4*l1] = zero;
fxyz[1+4*l1] = zero;
fxyz[2+4*l1] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[lj]*conjf(q[lj]));
wp += (double) at1;
/* sum1 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum1 += (wp + dd[0]);
}
}
/* mode numbers kz = 0, nz/2 */
l1 = nxvyh*nzh;
sum2 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxyz[4*(j+kj)] = at2*zt1;
fxyz[1+4*(j+kj)] = at3*zt1;
fxyz[2+4*(j+kj)] = zero;
fxyz[4*(j+k1)] = at2*zt2;
fxyz[1+4*(j+k1)] = -at3*zt2;
fxyz[2+4*(j+k1)] = zero;
fxyz[4*(j+kj+l1)] = zero;
fxyz[1+4*(j+kj+l1)] = zero;
fxyz[2+4*(j+kj+l1)] = zero;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
wp += (double) at1;
}
sum2 += wp;
}
/* mode numbers kx = 0, nx/2 */
wp = 0.0;
v_wp = _mm512_setzero_pd();
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxyz[4*kj] = zero;
fxyz[1+4*kj] = at3*zt1;
fxyz[2+4*kj] = zero;
fxyz[4*k1] = zero;
fxyz[1+4*k1] = zero;
fxyz[2+4*k1] = zero;
fxyz[4*(kj+l1)] = zero;
fxyz[1+4*(kj+l1)] = zero;
fxyz[2+4*(kj+l1)] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
at1 = at1*(q[kj]*conjf(q[kj]));
wp += (double) at1;
}
/* mode numbers ky = 0, ny/2 */
k1 = nxvh*nyh;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhs; j+=8) {
/* at1 = crealf(ffc[j])*cimagf(ffc[j]); */
v_at1 = _mm512_load_ps((float *)&ffc[j]);
v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177);
v_at1 = _mm512_mul_ps(v_at1,v_at2);
/* at2 = at1*dnx*(float) j; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it,
_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE);
v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2));
/* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */
v_zt1 = _mm512_load_ps((float *)&q[j]);
v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),v_zero,
v_zt1);
v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177);
/* zero out kx = 0 mode */
if (j==0) {
v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),v_zero);
}
/* fxyz[4*j] = at2*zt1; */
/* fxyz[1+4*j] = zero; */
/* fxyz[2+4*j] = zero; */
a = _mm512_mul_ps(v_at2,v_zt1);
b = v_zero;
c = v_zero;
/* perform 4x16 transpose for fxyz field components */
e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78);
f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78);
a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),v_zero,
177);
b = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),e,
177);
c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),v_zero,
177);
d = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),f,
177);
a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a);
b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b);
c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c);
d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d);
_mm512_store_ps((float *)&fxyz[4*j],a);
_mm512_store_ps((float *)&fxyz[8+4*j],b);
_mm512_store_ps((float *)&fxyz[16+4*j],c);
_mm512_store_ps((float *)&fxyz[24+4*j],d);
/* fxyz[4*(j+k1)] = zero; */
/* fxyz[1+4*(j+k1)] = zero; */
/* fxyz[2+4*(j+k1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1)],v_zero);
/* fxyz[4*(j+l1)] = zero; */
/* fxyz[1+4*(j+l1)] = zero; */
/* fxyz[2+4*(j+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+l1)],v_zero);
/* fxyz[4*(j+k1+l1)] = zero; */
/* fxyz[1+4*(j+k1+l1)] = zero; */
/* fxyz[2+4*(j+k1+l1)] = zero; */
_mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero);
_mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero);
/* wp += at1*(q[j]*conjf(q[j])); */
v_zt3 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_zt1,v_zt1));
/* convert to double precision before accumulating */
v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3));
v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78));
v_wp = _mm512_add_pd(v_wp,v_d);
}
/* loop over remaining elements */
for (j = itn; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxyz[4*j] = at2*zt1;
fxyz[1+4*j] = zero;
fxyz[2+4*j] = zero;
fxyz[4*(j+k1)] = zero;
fxyz[1+4*(j+k1)] = zero;
fxyz[2+4*(j+k1)] = zero;
fxyz[4*(j+l1)] = zero;
fxyz[1+4*(j+l1)] = zero;
fxyz[2+4*(j+l1)] = zero;
fxyz[4*(j+k1+l1)] = zero;
fxyz[1+4*(j+k1+l1)] = zero;
fxyz[2+4*(j+k1+l1)] = zero;
at1 = at1*(q[j]*conjf(q[j]));
wp += (double) at1;
}
fxyz[0] = zero;
fxyz[1] = zero;
fxyz[2] = zero;
fxyz[4*k1] = zero;
fxyz[1+4*k1] = zero;
fxyz[2+4*k1] = zero;
fxyz[4*l1] = zero;
fxyz[1+4*l1] = zero;
fxyz[2+4*l1] = zero;
fxyz[4*(k1+l1)] = zero;
fxyz[1+4*(k1+l1)] = zero;
fxyz[2+4*(k1+l1)] = zero;
/* sum2 += wp; */
_mm512_store_pd(&dd[0],v_wp);
for (j = 1; j < 8; j++) {
dd[0] += dd[j];
}
sum2 += (wp + dd[0]);
/* *we = wp*((float) nx)*((float) ny)*((float) nz); */
*we = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz);
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in x and y is performed
f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)*
exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in x and y is performed
f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 8
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd;
int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nss, nxhs, nxhhs, itn;
float ani;
float complex t1, t2, t3;
__m512i v_j, v_kmr, v_m, v_n, v_it;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani;
v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0);
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhyd = nxhd*nyd;
nxhs = 8*(nxh/8);
nxhhs = 8*(nxhh/8);
itn = 1 > nxhhs ? 1 : nxhhs;
v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0);
v_n = _mm512_set_epi32(1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L180;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3, \
v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 8*(ns/8);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nss; j+=8) {
/* t1 = sct[kmr*j]; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
/* t2 = t1*f[j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[j+k2+joff] = f[j+k1+joff] - t2; */
v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k2+joff],v_t4);
/* f[j+k1+joff] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
v_ani = _mm512_set1_ps(ani);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhhs; j+=8) {
/* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845),
v_zero,v_t3);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* t2 = conjf(f[nxh-j+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[nxh-j+joff-7]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[nxh-j+joff+1]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[j+joff] = ani*(t1 + t2); */
v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2));
/* f[nxh-j+joff] = ani*conjf(t1 - t2); */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
v_t4 = _mm512_mul_ps(v_ani,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[j+joff],
_mm512_int2mask(65532),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7],
_mm512_int2mask(16383),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1],
_mm512_int2mask(16383),v_t4);
}
else {
_mm512_store_ps((float *)&f[j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4);
_mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[i+k1]);
/* f[i+k1] = f[i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[i+joff]);
_mm512_store_ps((float *)&f[i+k1],v_t2);
/* f[i+joff] = t1; */
_mm512_store_ps((float *)&f[i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+j2] = f[i+j1] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j2],v_t4);
/* f[i+j1] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[joff] + t1)
+ crealf(f[joff] - t1)*_Complex_I);
f[joff] = 0.5*(crealf(f[joff] + t1)
+ cimagf(f[joff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L180: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3,v_it, \
v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd*k;
k1 = nxhd*ny - joff + nn;
joff += nn;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[joff] - t1);
f[joff] += t1;
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1 + nn;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[i+k1]);
/* f[i+k1] = f[i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[i+joff]);
_mm512_store_ps((float *)&f[i+k1],v_t2);
/* f[i+joff] = t1; */
_mm512_store_ps((float *)&f[i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+k1];
f[i+k1] = f[i+joff];
f[i+joff] = t1;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1) + nn;
j2 = nxhd*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+j2] = f[i+j1] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j2],v_t4);
/* f[i+j1] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nxhhs; j+=8) {
/* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* t2 = conjf(f[nxh-j+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[nxh-j+joff-7]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[nxh-j+joff+1]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[j+joff] = t1 + t2; */
v_t3 = _mm512_add_ps(v_t1,v_t2);
/* f[nxh-j+joff] = conjf(t1 - t2); */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[j+joff],
_mm512_int2mask(65532),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7],
_mm512_int2mask(16383),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1],
_mm512_int2mask(16383),v_t4);
}
else {
_mm512_store_ps((float *)&f[j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4);
_mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
}
for (k = 0; k < ny; k++) {
joff = nxhd*k + nn;
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 8*(ns/8);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (i = 0; i < ny; i++) {
joff = nxhd*i + nn;
/* vector loop over elements in blocks of 8 */
for (j = 0; j < nss; j+=8) {
/* t1 = conjf(sct[kmr*j]); */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690),
v_zero,v_t1);
/* t2 = t1*f[j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[j+k2+joff] = f[j+k1+joff] - t2; */
v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k2+joff],v_t4);
/* f[j+k1+joff] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rmz(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the z part of a three dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, an inverse fourier transform in z is performed
f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, a forward fourier transform in z is performed
f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f
nyd,nzd = second and third dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 8
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff;
int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1;
int nss, nxhs;
float complex t1, t2;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhyd = nxhd*nyd;
nxhs = 8*(nxh/8);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L90;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \
v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[i+i1]);
/* f[i+i1] = f[i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[i+i0]);
_mm512_store_ps((float *)&f[i+i1],v_t2);
/* f[i+i0] = t1; */
_mm512_store_ps((float *)&f[i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+i1] = f[i+i0] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i1],v_t4);
/* f[i+i0] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
t1 = f[l1];
f[l1] = 0.5*(cimagf(f[ll] + t1)
+ crealf(f[ll] - t1)*_Complex_I);
f[ll] = 0.5*(crealf(f[ll] + t1)
+ cimagf(f[ll] - t1)*_Complex_I);
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = f[i1];
f[i1] = 0.5*(cimagf(f[i0] + t1)
+ crealf(f[i0] - t1)*_Complex_I);
f[i0] = 0.5*(crealf(f[i0] + t1)
+ cimagf(f[i0] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L90: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I;
f[l1] = conjf(f[ll] - t1);
f[ll] += t1;
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd*nyh;
i0 = i1 + ll;
i1 += l1;
t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I;
f[i1] = conjf(f[i0] - t1);
f[i0] += t1;
}
}
/* bit-reverse array elements in z */
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \
v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd*n;
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t1 = f[i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[i+i1]);
/* f[i+i1] = f[i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[i+i0]);
_mm512_store_ps((float *)&f[i+i1],v_t2);
/* f[i+i0] = t1; */
_mm512_store_ps((float *)&f[i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[i+i1];
f[i+i1] = f[i+i0];
f[i+i0] = t1;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 8 */
for (i = 0; i < nxhs; i+=8) {
/* t2 = t1*f[i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[i+i1] = f[i+i0] - t2; */
v_t3 = _mm512_load_ps((float *)&f[i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i1],v_t4);
/* f[i+i0] += t2; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[i+i1];
f[i+i1] = f[i+i0] - t2;
f[i+i0] += t2;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rm3xy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nzi, int nzp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the x-y part of 3 three dimensional complex
to real fast fourier transforms and their inverses, for a subset of z,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in x and y are
performed
f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]*
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, three forward fourier transforms in x and y are
performed
f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nzi = initial z index used
nzp = number of z indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 2
f needs to have 4 components
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh;
int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd4, nxhyd;
int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nss, nxhs, nxhhs, itn;
float at1, at2, ani;
float complex t1, t2, t3, t4;
__m512i v_j, v_kmr, v_m, v_n, v_l, v_it;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani, v_half;
v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0);
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nzt = nzi + nzp - 1;
nxhd4 = 4*nxhd;
nxhyd = nxhd4*nyd;
nxhs = 2*(nxh/2);
nxhhs = 2*(nxhh/2);
itn = 1 > nxhhs ? 1 : nxhhs;
v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0);
v_n = _mm512_set_epi32(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
v_half = _mm512_set1_ps(0.5f);
if (isign > 0)
goto L230;
/* inverse fourier transform */
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
nryb = nxhyz/ny;
nry = nxyz/ny;
v_l = _mm512_set_epi32(15,11,14,10,13,9,12,8,7,3,6,2,5,1,4,0);
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \
ani,t1,t2,t3,t4,v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* at1 = cimagf(f[2+4*j+joff]); */
/* at2 = crealf(f[2+4*j+joff]); */
/* f[2+4*j+joff] = crealf(f[1+4*j+joff]) */
/* + crealf(f[3+4*j+joff])*_Complex_I; */
/* f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; */
/* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */
v_t1 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1);
_mm512_store_ps((float *)&f[4*j+joff],v_t1);
}
/* loop over remaining elements */
for (j = nxhs; j < nxh; j++) {
at1 = cimagf(f[2+4*j+joff]);
at2 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = crealf(f[1+4*j+joff])
+ crealf(f[3+4*j+joff])*_Complex_I;
f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* t1 = f[4*j1+joff]; */
/* t2 = f[1+4*j1+joff]; */
/* t3 = f[2+4*j1+joff]; */
v_t1 = _mm512_mask_loadunpacklo_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff]);
v_t1 = _mm512_mask_loadunpackhi_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff+8]);
/* f[4*j1+joff] = f[4*j+joff]; */
/* f[1+4*j1+joff] = f[1+4*j+joff]; */
/* f[2+4*j1+joff] = f[2+4*j+joff]; */
v_t2 = _mm512_mask_loadunpacklo_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff]);
v_t2 = _mm512_mask_loadunpackhi_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff+8]);
_mm512_mask_packstorelo_ps((float *)&f[4*j1+joff],
_mm512_int2mask(255),v_t2);
_mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8],
_mm512_int2mask(255),v_t2);
/* f[4*j+joff] = t1; */
/* f[1+4*j+joff] = t2; */
/* f[2+4*j+joff] = t3; */
_mm512_mask_packstorelo_ps((float *)&f[4*j+joff],
_mm512_int2mask(255),v_t1);
_mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8],
_mm512_int2mask(255),v_t1);
}
}
}
/* first transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 2*(ns/2);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nss; j+=2) {
/* t1 = sct[kmr*j]; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
/* t2 = t1*f[4*j+k2+joff]; */
/* t3 = t1*f[1+4*j+k2+joff]; */
/* t4 = t1*f[2+4*j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */
/* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */
/* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k2+joff],v_t4);
/* f[4*j+k1+joff] += t2; */
/* f[1+4*j+k1+joff] += t3; */
/* f[2+4*j+k1+joff] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxyz/nx;
ani = 0.5/(((float) nx)*((float) ny)*((float) nz));
v_ani = _mm512_set1_ps(ani);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhhs; j+=2) {
/* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845),
v_zero,v_t3);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* for (jj = 0; jj < 3; jj++) { */
/* t2 = conjf(f[jj+4*(nxh-j)+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff+8]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[jj+4*j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[jj+4*j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[jj+4*j+joff] = ani*(t1 + t2); */
v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2));
/* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */
/* } */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
v_t4 = _mm512_mul_ps(v_ani,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[4*j+joff],
_mm512_int2mask(65280),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],
_mm512_int2mask(255),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],
_mm512_int2mask(255),v_t4);
}
else {
_mm512_store_ps((float *)&f[4*j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4);
_mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = ani*(t1 + t2);
f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
}
/* ani = 2.0*ani; */
v_ani = _mm512_add_ps(v_ani,v_ani);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* for (jj = 0; jj < 3; jj++) { */
/* f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[4*nxhh+joff]);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero,
v_t1);
v_t1 = _mm512_mul_ps(v_ani,v_t1);
_mm512_mask_store_ps((float *)&f[4*nxhh+joff],
_mm512_int2mask(63),v_t1);
/* f[jj+joff] = ani*((crealf(f[jj+joff]) */
/* + cimagf(f[jj+joff])) */
/* + (crealf(f[jj+joff]) */
/* - cimagf(f[jj+joff]))*_Complex_I); */
/* } */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2);
v_t3 = _mm512_mul_ps(v_ani,v_t3);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t3);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd4*k1 + nn;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+k1]; */
/* t2 = f[1+4*i+k1]; */
/* t3 = f[2+4*i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+k1]);
/* f[4*i+k1] = f[4*i+joff]; */
/* f[1+4*i+k1] = f[1+4*i+joff]; */
/* f[2+4*i+k1] = f[2+4*i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+joff]);
_mm512_store_ps((float *)&f[4*i+k1],v_t2);
/* f[4*i+joff] = t1; */
/* f[1+4*i+joff] = t2; */
/* f[2+4*i+joff] = t3; */
_mm512_store_ps((float *)&f[4*i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+joff];
f[1+4*i+k1] = f[1+4*i+joff];
f[2+4*i+k1] = f[2+4*i+joff];
f[4*i+joff] = t1;
f[1+4*i+joff] = t2;
f[2+4*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd4*(j + k1) + nn;
j2 = nxhd4*(j + k2) + nn;
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+j2]; */
/* t3 = t1*f[1+4*i+j2]; */
/* t4 = t1*f[2+4*i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+j2] = f[4*i+j1] - t2; */
/* f[1+4*i+j2] = f[1+4*i+j1] - t3; */
/* f[2+4*i+j2] = f[2+4*i+j1] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j2],v_t4);
/* f[4*i+j1] += t2; */
/* f[1+4*i+j1] += t3; */
/* f[2+4*i+j1] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
}
ns = ns2;
}
/* unscramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd4*k;
k1 = nxhd4*ny - joff + nn;
joff += nn;
/* for (jj = 0; jj < 3; jj++) { */
/* t1 = f[jj+k1]; */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[k1]);
/* f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) */
/* + crealf(f[jj+joff] - t1)*_Complex_I); */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(42),v_t2,v_t1);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21),v_t2,v_t1);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
v_t3 = _mm512_mul_ps(v_half,v_t3);
_mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3);
/* f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) */
/* + cimagf(f[jj+joff] - t1)*_Complex_I); */
/* } */
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t2,v_t1);
v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(21),v_t2,v_t1);
v_t2 = _mm512_mul_ps(v_half,v_t2);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),v_t2);
}
}
return;
/* forward fourier transform */
L230: nryb = nxhyz/ny;
nry = nxyz/ny;
nrxb = nxhyz/nxh;
nrx = nxyz/nxh;
v_l = _mm512_set_epi32(15,13,11,9,14,12,10,8,7,5,3,1,6,4,2,0);
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \
t1,t2,t3,t4,v_it,v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5)
for (n = nzi-1; n < nzt; n++) {
nn = nxhyd*n;
/* scramble modes kx = 0, nx/2 */
for (k = 1; k < nyh; k++) {
joff = nxhd4*k;
k1 = nxhd4*ny - joff + nn;
joff += nn;
/* for (jj = 0; jj < 3; jj++) { */
/* t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[k1]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,177);
/* f[jj+k1] = conjf(f[jj+joff] - t1); */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(63),v_t2,v_t1);
v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(42),
v_zero,v_t3);
_mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3);
/* f[jj+joff] += t1; */
/* } */
v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(63),v_t2,v_t1);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t2);
}
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd4*k1 + nn;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+k1]; */
/* t2 = f[1+4*i+k1]; */
/* t3 = f[2+4*i+k1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+k1]);
/* f[4*i+k1] = f[4*i+joff]; */
/* f[1+4*i+k1] = f[1+4*i+joff]; */
/* f[2+4*i+k1] = f[2+4*i+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+joff]);
_mm512_store_ps((float *)&f[4*i+k1],v_t2);
/* f[4*i+joff] = t1; */
/* f[1+4*i+joff] = t2; */
/* f[2+4*i+joff] = t3; */
_mm512_store_ps((float *)&f[4*i+joff],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+joff];
f[1+4*i+k1] = f[1+4*i+joff];
f[2+4*i+k1] = f[2+4*i+joff];
f[4*i+joff] = t1;
f[1+4*i+joff] = t2;
f[2+4*i+joff] = t3;
}
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd4*(j + k1) + nn;
j2 = nxhd4*(j + k2) + nn;
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+j2]; */
/* t3 = t1*f[1+4*i+j2]; */
/* t4 = t1*f[2+4*i+j2]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+j2]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+j2] = f[4*i+j1] - t2; */
/* f[1+4*i+j2] = f[1+4*i+j1] - t3; */
/* f[2+4*i+j2] = f[2+4*i+j1] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+j1]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j2],v_t4);
/* f[4*i+j1] += t2; */
/* f[1+4*i+j1] += t3; */
/* f[2+4*i+j1] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+j1],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
}
ns = ns2;
}
/* scramble coefficients */
kmr = nxyz/nx;
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhhs; j+=2) {
/* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177);
/* for (jj = 0; jj < 3; jj++) { */
/* t2 = conjf(f[jj+4*(nxh-j)+joff]); */
v_t2 = _mm512_loadunpacklo_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff]);
v_t2 = _mm512_loadunpackhi_ps(v_t2,
(float *)&f[4*(nxh-j-1)+joff+8]);
/* reverse data */
v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2);
v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690),
v_zero,v_t2);
/* t1 = f[jj+4*j+joff] + t2; */
v_t4 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = _mm512_add_ps(v_t4,v_t2);
/* t2 = (f[jj+4*j+joff] - t2)*t3; */
v_t2 = _mm512_sub_ps(v_t4,v_t2);
v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160);
v_t5 = _mm512_mul_ps(v_t2,v_t5);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t5,v_t4);
/* f[jj+4*j+joff] = t1 + t2; */
v_t3 = _mm512_add_ps(v_t1,v_t2);
/* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */
/* } */
v_t4 = _mm512_sub_ps(v_t1,v_t2);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690),
v_zero,v_t4);
/* reverse data */
v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4);
if (j==0) {
_mm512_mask_store_ps((float *)&f[4*j+joff],
_mm512_int2mask(65280),v_t3);
_mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],
_mm512_int2mask(255),v_t4);
_mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],
_mm512_int2mask(255),v_t4);
}
else {
_mm512_store_ps((float *)&f[4*j+joff],v_t3);
_mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4);
_mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4);
}
}
/* loop over remaining elements */
for (j = itn; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = t1 + t2;
f[jj+4*(nxh-j)+joff] = conjf(t1 - t2);
}
}
}
for (k = 0; k < ny; k++) {
joff = nxhd4*k + nn;
/* for (jj = 0; jj < 3; jj++) { */
/* f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); */
v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63),
(float *)&f[4*nxhh+joff]);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero,
v_t1);
v_t1 = _mm512_add_ps(v_t1,v_t1);
_mm512_mask_store_ps((float *)&f[4*nxhh+joff],
_mm512_int2mask(63),v_t1);
/* f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) */
/* + (crealf(f[jj+joff]) */
/* - cimagf(f[jj+joff]))*_Complex_I; */
/* } */
v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63),
(float *)&f[joff]);
v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2);
v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2);
_mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),
v_t3);
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* t1 = f[4*j1+joff]; */
/* t2 = f[1+4*j1+joff]; */
/* t3 = f[2+4*j1+joff]; */
v_t1 = _mm512_mask_loadunpacklo_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff]);
v_t1 = _mm512_mask_loadunpackhi_ps(v_t1,
_mm512_int2mask(255),(float *)&f[4*j1+joff+8]);
/* f[4*j1+joff] = f[4*j+joff]; */
/* f[1+4*j1+joff] = f[1+4*j+joff]; */
/* f[2+4*j1+joff] = f[2+4*j+joff]; */
v_t2 = _mm512_mask_loadunpacklo_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff]);
v_t2 = _mm512_mask_loadunpackhi_ps(v_t2,
_mm512_int2mask(255),(float *)&f[4*j+joff+8]);
_mm512_mask_packstorelo_ps((float *)&f[4*j1+joff],
_mm512_int2mask(255),v_t2);
_mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8],
_mm512_int2mask(255),v_t2);
/* f[4*j+joff] = t1; */
/* f[1+4*j+joff] = t2; */
/* f[2+4*j+joff] = t3; */
_mm512_mask_packstorelo_ps((float *)&f[4*j+joff],
_mm512_int2mask(255),v_t1);
_mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8],
_mm512_int2mask(255),v_t1);
}
}
}
/* finally transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
nss = 2*(ns/2);
v_kmr = _mm512_set1_epi32(2*kmr);
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nss; j+=2) {
/* t1 = conjf(sct[kmr*j]); */
v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j);
v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m);
v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4);
v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690),
v_zero,v_t1);
/* t2 = t1*f[4*j+k2+joff]; */
/* t3 = t1*f[1+4*j+k2+joff]; */
/* t4 = t1*f[2+4*j+k2+joff]; */
v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */
/* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */
/* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k2+joff],v_t4);
/* f[4*j+k1+joff] += t2; */
/* f[1+4*j+k1+joff] += t3; */
/* f[2+4*j+k1+joff] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*j+k1+joff],v_t4);
}
/* loop over remaining elements */
for (j = nss; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
}
ns = ns2;
}
/* swap complex components */
for (i = 0; i < ny; i++) {
joff = nxhd4*i + nn;
/* vector loop over elements in blocks of 2 */
for (j = 0; j < nxhs; j+=2) {
/* f[3+4*j+joff] = cimagf(f[2+4*j+joff]) */
/* + cimagf(f[3+4*j+joff])*_Complex_I; */
/* at1 = crealf(f[2+4*j+joff]); */
/* f[2+4*j+joff] = cimagf(f[4*j+joff]) */
/* + cimagf(f[1+4*j+joff])*_Complex_I; */
/* at2 = crealf(f[1+4*j+joff]); */
/* f[1+4*j+joff] = at1 + 0.0*_Complex_I; */
/* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */
v_t1 = _mm512_load_ps((float *)&f[4*j+joff]);
v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1);
_mm512_store_ps((float *)&f[4*j+joff],v_t1);
}
/* loop over remaining elements */
for (j = nxhs; j < nxh; j++) {
f[3+4*j+joff] = cimagf(f[2+4*j+joff])
+ cimagf(f[3+4*j+joff])*_Complex_I;
at1 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = cimagf(f[4*j+joff])
+ cimagf(f[1+4*j+joff])*_Complex_I;
at2 = crealf(f[1+4*j+joff]);
f[1+4*j+joff] = at1 + 0.0*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncfft3rm3z(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nyi, int nyp, int nxhd, int nyd, int nzd,
int nxhyzd, int nxyzhd) {
/* this subroutine performs the z part of 3 three dimensional complex to
real fast fourier transforms and their inverses, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny*nz
indx/indy/indz = exponent which determines length in x/y/z direction,
where nx=2**indx, ny=2**indy, nz=2**indz
if isign = -1, three inverse fourier transforms in z are performed
f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz))
if isign = 1, three forward fourier transforms in z are performed
f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f
nyd,nzd = third and fourth dimensions of f
nxhyzd = maximum of (nx/2,ny,nz)
nxyzhd = maximum of (nx,ny,nz)/2
fourier coefficients are stored as follows:
f[l][k][j][0:2] = real, imaginary part of mode j,k,l
where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for
f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l,
where ny/2+1 <= k < ny and 0 <= l < nz, and
f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l,
f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l,
where nz/2+1 <= l < nz, and
imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0
imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0
imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2
imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2
using jpl storage convention, as described in:
E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent
Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained
Distributed Memory Parallel Computers," Caltech CRPC Report 217-50,
December 1993.
requires KNC, f needs to be 64 byte aligned
nxhd need to be a multiple of 2
f needs to have 4 components
written by viktor k. decyk, ucla
local data */
int indx1, ndx1yz, nx, nxh, ny, nyh;
int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd4, nxhyd, ioff;
int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr;
int i0, i1;
int nxhs;
float complex t1, t2, t3, t4;
__m512 v_zero, v_t1, v_t2, v_t3, v_t4;
if (isign==0)
return;
indx1 = indx - 1;
ndx1yz = indx1 > indy ? indx1 : indy;
ndx1yz = ndx1yz > indz ? ndx1yz : indz;
nx = 1L<<indx;
nxh = nx/2;
ny = 1L<<indy;
nyh = ny/2;
nz = 1L<<indz;
nzh = nz/2;
nxyz = nx > ny ? nx : ny;
nxyz = nxyz > nz ? nxyz : nz;
nxhyz = 1L<<ndx1yz;
nyt = nyi + nyp - 1;
nxhd4 = 4*nxhd;
nxhyd = nxhd4*nyd;
nxhs = 2*(nxh/2);
v_zero = _mm512_setzero_ps();
v_t1 = _mm512_setzero_ps();
v_t2 = _mm512_setzero_ps();
v_t3 = _mm512_setzero_ps();
v_t4 = _mm512_setzero_ps();
if (isign > 0)
goto L110;
/* inverse fourier transform */
nrzb = nxhyz/nz;
nrz = nxyz/nz;
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4,v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd4*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+i1]; */
/* t2 = f[1+4*i+i1]; */
/* t3 = f[2+4*i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+i1]);
/* f[4*i+i1] = f[4*i+i0]; */
/* f[1+4*i+i1] = f[1+4*i+i0]; */
/* f[2+4*i+i1] = f[2+4*i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i0]);
_mm512_store_ps((float *)&f[4*i+i1],v_t2);
/* f[4*i+i0] = t1; */
/* f[1+4*i+i0] = t2; */
/* f[2+4*i+i0] = t3; */
_mm512_store_ps((float *)&f[4*i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+i1];
t2 = f[1+4*i+i1];
t3 = f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0];
f[1+4*i+i1] = f[1+4*i+i0];
f[2+4*i+i1] = f[2+4*i+i0];
f[4*i+i0] = t1;
f[1+4*i+i0] = t2;
f[2+4*i+i0] = t3;
}
}
}
/* finally transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = sct[kmr*j];
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+i1]; */
/* t3 = t1*f[1+4*i+i1]; */
/* t4 = t1*f[2+4*i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+i1] = f[4*i+i0] - t2; */
/* f[1+4*i+i1] = f[1+4*i+i0] - t3; */
/* f[2+4*i+i1] = f[2+4*i+i0] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i1],v_t4);
/* f[4*i+i0] += t2; */
/* f[1+4*i+i0] += t3; */
/* f[2+4*i+i0] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+i1];
t3 = t1*f[1+4*i+i1];
t4 = t1*f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0] - t2;
f[1+4*i+i1] = f[1+4*i+i0] - t3;
f[2+4*i+i1] = f[2+4*i+i0] - t4;
f[4*i+i0] += t2;
f[1+4*i+i0] += t3;
f[2+4*i+i0] += t4;
}
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+l1];
f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1)
+ crealf(f[jj+ll] - t1)*_Complex_I);
f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1)
+ cimagf(f[jj+ll] - t1)*_Complex_I);
}
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd4*nyh;
i0 = i1 + ll;
i1 += l1;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+i1];
f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1)
+ crealf(f[jj+i0] - t1)*_Complex_I);
f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1)
+ cimagf(f[jj+i0] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L110: nrzb = nxhyz/nz;
nrz = nxyz/nz;
/* scramble modes kx = 0, nx/2 */
if (nyi==1) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I;
f[jj+l1] = conjf(f[jj+ll] - t1);
f[jj+ll] += t1;
}
}
}
if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) {
for (n = 1; n < nzh; n++) {
ll = nxhyd*n;
l1 = nxhyd*nz - ll;
i1 = nxhd4*nyh;
i0 = i1 + ll;
i1 += l1;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I;
f[jj+i1] = conjf(f[jj+i0] - t1);
f[jj+i0] += t1;
}
}
}
#pragma omp parallel for \
private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \
t4,v_t1,v_t2,v_t3,v_t4)
for (n = nyi-1; n < nyt; n++) {
ioff = nxhd4*n;
/* bit-reverse array elements in z */
for (l = 0; l < nz; l++) {
ll = nxhyd*l;
l1 = (mixup[l] - 1)/nrzb;
if (l < l1) {
l1 = nxhyd*l1;
i0 = ioff + ll;
i1 = ioff + l1;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t1 = f[4*i+i1]; */
/* t2 = f[1+4*i+i1]; */
/* t3 = f[2+4*i+i1]; */
v_t1 = _mm512_load_ps((float *)&f[4*i+i1]);
/* f[4*i+i1] = f[4*i+i0]; */
/* f[1+4*i+i1] = f[1+4*i+i0]; */
/* f[2+4*i+i1] = f[2+4*i+i0]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i0]);
_mm512_store_ps((float *)&f[4*i+i1],v_t2);
/* f[4*i+i0] = t1; */
/* f[1+4*i+i0] = t2; */
/* f[2+4*i+i0] = t3; */
_mm512_store_ps((float *)&f[4*i+i0],v_t1);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t1 = f[4*i+i1];
t2 = f[1+4*i+i1];
t3 = f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0];
f[1+4*i+i1] = f[1+4*i+i0];
f[2+4*i+i1] = f[2+4*i+i0];
f[4*i+i0] = t1;
f[1+4*i+i0] = t2;
f[2+4*i+i0] = t3;
}
}
}
/* first transform in z */
ns = 1;
for (l = 0; l < indz; l++) {
ns2 = ns + ns;
km = nzh/ns;
kmr = km*nrz;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhyd*(j + k1);
j2 = nxhyd*(j + k2);
t1 = conjf(sct[kmr*j]);
v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1),
crealf(t1));
i0 = ioff + j1;
i1 = ioff + j2;
/* vector loop over elements in blocks of 2 */
for (i = 0; i < nxhs; i+=2) {
/* t2 = t1*f[4*i+i1]; */
/* t3 = t1*f[1+4*i+i1]; */
/* t4 = t1*f[2+4*i+i1]; */
v_t2 = _mm512_load_ps((float *)&f[4*i+i1]);
v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160);
v_t3 = _mm512_mul_ps(v_t2,v_t3);
v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177);
v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245);
v_t4 = _mm512_mul_ps(v_t2,v_t4);
v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845),
v_zero,v_t4);
v_t2 = _mm512_add_ps(v_t3,v_t4);
/* f[4*i+i1] = f[4*i+i0] - t2; */
/* f[1+4*i+i1] = f[1+4*i+i0] - t3; */
/* f[2+4*i+i1] = f[2+4*i+i0] - t4; */
v_t3 = _mm512_load_ps((float *)&f[4*i+i0]);
v_t4 = _mm512_sub_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i1],v_t4);
/* f[4*i+i0] += t2; */
/* f[1+4*i+i0] += t3; */
/* f[2+4*i+i0] += t4; */
v_t4 = _mm512_add_ps(v_t3,v_t2);
_mm512_store_ps((float *)&f[4*i+i0],v_t4);
}
/* loop over remaining elements */
for (i = nxhs; i < nxh; i++) {
t2 = t1*f[4*i+i1];
t3 = t1*f[1+4*i+i1];
t4 = t1*f[2+4*i+i1];
f[4*i+i1] = f[4*i+i0] - t2;
f[1+4*i+i1] = f[1+4*i+i0] - t3;
f[2+4*i+i1] = f[2+4*i+i0] - t4;
f[4*i+i0] += t2;
f[1+4*i+i0] += t3;
f[2+4*i+i0] += t4;
}
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for real to complex fft, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform z fft */
ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rm3(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int indz,
int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) {
/* wrapper function for 3 2d real to complex ffts, with packed data */
/* local data */
int ny, nz;
static int nyi = 1, nzi = 1;
/* calculate range of indices */
ny = 1L<<indy;
nz = 1L<<indz;
/* inverse fourier transform */
if (isign < 0) {
/* perform xy fft */
ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,
nzd,nxhyzd,nxyzhd);
/* perform z fft */
ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform z fft */
ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd,
nxhyzd,nxyzhd);
/* perform xy fft */
ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,
nzd,nxhyzd,nxyzhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void ckncgppush3lt_(float *ppart, float *fxyz, int *kpic, float *qbm,
float *dt, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *nz, int *mx, int *my,
int *mz, int *nxv, int *nyv, int *nzv, int *mx1,
int *my1, int *mxyz1, int *ipbc) {
ckncgppush3lt(ppart,fxyz,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*nz,
*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgppushf3lt_(float *ppart, float *fxyz, int *kpic, int *ncl,
int *ihole, float *qbm, float *dt, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *nz,
int *mx, int *my, int *mz, int *nxv, int *nyv,
int *nzv, int *mx1, int *my1, int *mxyz1,
int *ntmax, int *irc) {
ckncgppushf3lt(ppart,fxyz,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx,
*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,
*mxyz1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncgppost3lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
ckncgppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void cknc2gppost3lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *mz,
int *nxv, int *nyv, int *nzv, int *mx1, int *my1,
int *mxyz1) {
cknc2gppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv,
*nzv,*mx1,*my1,*mxyz1);
return;
}
/*--------------------------------------------------------------------*/
void ckncpporder3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx,
int *ny, int *nz, int *mx, int *my, int *mz,
int *mx1, int *my1, int *mz1, int *npbmx,
int *ntmax, int *irc) {
ckncpporder3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz,
*mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ckncpporderf3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *mz1, int *npbmx, int *ntmax,
int *irc) {
ckncpporderf3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*mz1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cknccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
cknccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncaguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe,
int *nye, int *nze) {
ckncaguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze);
return;
}
/*--------------------------------------------------------------------*/
void ckncmpois33_(float complex *q, float complex *fxyz, int *isign,
float complex *ffc, float *ax, float *ay, float *az,
float *affp, float *we, int *nx, int *ny, int *nz,
int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd,
int *nzhd) {
ckncmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh,
*nyv,*nzv,*nxhd,*nyhd,*nzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
ckncwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
/*--------------------------------------------------------------------*/
void ckncwfft3rm3_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *indz,
int *nxhd, int *nyd, int *nzd, int *nxhyzd,
int *nxyzhd) {
ckncwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd,
*nxhyzd,*nxyzhd);
return;
}
|
fci_contract.c | /* Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <assert.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#include "np_helper/np_helper.h"
#include "fci.h"
// for (16e,16o) ~ 11 MB buffer = 120 * 12870 * 8
#define STRB_BLKSIZE 112
/*
* CPU timing of single thread can be estimated:
* na*nb*nnorb*8(bytes)*5 / (mem_freq*64 (*2 if dual-channel mem))
* + na*nb*nnorb**2 (*2 for spin1, *1 for spin0)
* / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas))
* where the 5 times memory accesses are 3 in prog_a_t1, prog0_b_t1,
* spread_b_t1 and 2 in spread_a_t1
*
* multi threads
* na*nb*nnorb*8(bytes)*2 / (mem_freq*64 (*2 if dual-channel mem)) due to single thread
* + na*nb*nnorb*8(bytes)*3 / max_mem_bandwidth due to N-thread
* + na*nb*nnorb**2 (*2 for spin1, *1 for spin0)
* / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) / num_threads
*/
/*
***********************************************************
*
* Need the permutation symmetry
* h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k]
*
***********************************************************
*/
/*
* optimize for OpenMP, to reduce memory/CPU data transfer
* add software prefetch, it's especially important for OpenMP
*/
/*
* For given stra_id, spread alpah-strings (which can propagate to stra_id)
* into t1[:nstrb,nnorb]
* str1-of-alpha -> create/annihilate -> str0-of-alpha
* ci0[:nstra,:nstrb] is contiguous in beta-strings
* bcount control the number of beta strings to be calculated.
* for spin=0 system, only lower triangle of the intermediate ci vector
* needs to be calculated
*/
void FCIprog_a_t1(double *ci0, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa)
{
ci0 += strb_id;
int j, k, ia, sign;
size_t str1;
const _LinkTrilT *tab = clink_indexa + stra_id * nlinka;
double *pt1, *pci;
for (j = 0; j < nlinka; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pt1 = t1 + ia*bcount;
pci = ci0 + str1*nstrb;
if (sign == 0) {
break;
} else if (sign > 0) {
for (k = 0; k < bcount; k++) {
pt1[k] += pci[k];
}
} else if (sign < 0) {
for (k = 0; k < bcount; k++) {
pt1[k] -= pci[k];
}
}
}
}
/*
* For given stra_id, spread all beta-strings into t1[:nstrb,nnorb]
* all str0-of-beta -> create/annihilate -> str1-of-beta
* ci0[:nstra,:nstrb] is contiguous in beta-strings
* bcount control the number of beta strings to be calculated.
* for spin=0 system, only lower triangle of the intermediate ci vector
* needs to be calculated
*/
void FCIprog_b_t1(double *ci0, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb)
{
int j, ia, str0, str1, sign;
const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb;
double *pci = ci0 + stra_id*(size_t)nstrb;
for (str0 = 0; str0 < bcount; str0++) {
for (j = 0; j < nlinkb; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
if (sign == 0) {
break;
} else {
t1[ia*bcount+str0] += sign * pci[str1];
}
}
tab += nlinkb;
}
}
/*
* spread t1 into ci1
*/
void FCIspread_a_t1(double *ci1, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa)
{
ci1 += strb_id;
int j, k, ia, sign;
size_t str1;
const _LinkTrilT *tab = clink_indexa + stra_id * nlinka;
double *cp0, *cp1;
for (j = 0; j < nlinka; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
cp0 = t1 + ia*bcount;
cp1 = ci1 + str1*nstrb;
if (sign == 0) {
break;
} else if (sign > 0) {
for (k = 0; k < bcount; k++) {
cp1[k] += cp0[k];
}
} else {
for (k = 0; k < bcount; k++) {
cp1[k] -= cp0[k];
}
}
}
}
void FCIspread_b_t1(double *ci1, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb)
{
int j, ia, str0, str1, sign;
const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb;
double *pci = ci1 + stra_id * (size_t)nstrb;
for (str0 = 0; str0 < bcount; str0++) {
for (j = 0; j < nlinkb; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
if (sign == 0) {
break;
} else {
pci[str1] += sign * t1[ia*bcount+str0];
}
}
tab += nlinkb;
}
}
/*
* f1e_tril is the 1e hamiltonian for spin alpha
*/
void FCIcontract_a_1e(double *f1e_tril, double *ci0, double *ci1,
int norb, int nstra, int nstrb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
int j, k, ia, sign;
size_t str0, str1;
double *pci0, *pci1;
double tmp;
_LinkTrilT *tab;
_LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinka * nstra);
FCIcompress_link_tril(clink, link_indexa, nstra, nlinka);
for (str0 = 0; str0 < nstra; str0++) {
tab = clink + str0 * nlinka;
for (j = 0; j < nlinka; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci0 = ci0 + str0 * nstrb;
pci1 = ci1 + str1 * nstrb;
tmp = sign * f1e_tril[ia];
for (k = 0; k < nstrb; k++) {
pci1[k] += tmp * pci0[k];
}
}
}
free(clink);
}
/*
* f1e_tril is the 1e hamiltonian for spin beta
*/
void FCIcontract_b_1e(double *f1e_tril, double *ci0, double *ci1,
int norb, int nstra, int nstrb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
int j, k, ia, sign;
size_t str0, str1;
double *pci1;
double tmp;
_LinkTrilT *tab;
_LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinkb * nstrb);
FCIcompress_link_tril(clink, link_indexb, nstrb, nlinkb);
for (str0 = 0; str0 < nstra; str0++) {
pci1 = ci1 + str0 * nstrb;
for (k = 0; k < nstrb; k++) {
tab = clink + k * nlinkb;
tmp = ci0[str0*nstrb+k];
for (j = 0; j < nlinkb; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci1[str1] += sign * tmp * f1e_tril[ia];
}
}
}
free(clink);
}
void FCIcontract_1e_spin0(double *f1e_tril, double *ci0, double *ci1,
int norb, int na, int nlink, int *link_index)
{
memset(ci1, 0, sizeof(double)*na*na);
FCIcontract_a_1e(f1e_tril, ci0, ci1, norb, na, na, nlink, nlink,
link_index, link_index);
}
/*
* spread t1 into ci1buf
*/
static void spread_bufa_t1(double *ci1, double *t1, int nrow_t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa)
{
int j, k, ia, sign;
size_t str1;
const _LinkTrilT *tab = clink_indexa + stra_id * nlinka;
double *cp0, *cp1;
for (j = 0; j < nlinka; j++) {
ia = EXTRACT_IA (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
cp0 = t1 + ia*nrow_t1;
cp1 = ci1 + str1*nstrb;
if (sign == 0) {
break;
} else if (sign > 0) {
for (k = 0; k < bcount; k++) {
cp1[k] += cp0[k];
}
} else {
for (k = 0; k < bcount; k++) {
cp1[k] -= cp0[k];
}
}
}
}
/*
* bcount_for_spread_a is different for spin1 and spin0
*/
static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount_for_spread_a, int ncol_ci1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * (norb+1)/2;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
memset(t1, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
FCIprog_b_t1(ci0, t1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount);
FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
//FCIspread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0,
// norb, ncol_ci1buf, nlinka, clink_indexa);
spread_bufa_t1(ci1buf, vt1, bcount, bcount_for_spread_a, stra_id, 0,
norb, ncol_ci1buf, nlinka, clink_indexa);
}
void FCIaxpy2d(double *out, double *in, size_t count, size_t no, size_t ni)
{
int i, j;
for (i = 0; i < count; i++) {
for (j = 0; j < ni; j++) {
out[i*no+j] += in[i*ni+j];
}
}
}
static void _reduce(double *out, double **in, size_t count, size_t no, size_t ni)
{
unsigned int nthreads = omp_get_num_threads();
unsigned int thread_id = omp_get_thread_num();
size_t blksize = (count + nthreads - 1) / nthreads;
size_t start = thread_id * blksize;
size_t end = MIN(start + blksize, count);
double *src;
size_t it, i, j;
for (it = 0; it < nthreads; it++) {
src = in[it];
for (i = start; i < end; i++) {
for (j = 0; j < ni; j++) {
out[i*no+j] += src[i*ni+j];
}
}
}
}
/*
* nlink = nocc*nvir, num. all possible strings that a string can link to
* link_index[str0] == linking map between str0 and other strings
* link_index[str0][ith-linking-string] ==
* [tril(creation_op,annihilation_op),0,linking-string-id,sign]
* FCIcontract_2e_spin0 only compute half of the contraction, due to the
* symmetry between alpha and beta spin. The right contracted ci vector
* is (ci1+ci1.T)
*/
void FCIcontract_2e_spin0(double *eri, double *ci0, double *ci1,
int norb, int na, int nlink, int *link_index)
{
_LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlink * na);
FCIcompress_link_tril(clink, link_index, na, nlink);
memset(ci1, 0, sizeof(double)*na*na);
double *ci1bufs[MAX_THREADS];
#pragma omp parallel
{
int strk, ib;
size_t blen;
double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2));
double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2));
ci1bufs[omp_get_thread_num()] = ci1buf;
for (ib = 0; ib < na; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, na-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static, 112)
/* strk starts from MAX(strk0, ib), because [0:ib,0:ib] have been evaluated */
for (strk = ib; strk < na; strk++) {
ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf,
MIN(STRB_BLKSIZE, strk-ib), blen,
MIN(STRB_BLKSIZE, strk+1-ib),
strk, ib, norb, na, na, nlink, nlink,
clink, clink);
}
// NPomp_dsum_reduce_inplace(ci1bufs, blen*na);
//#pragma omp master
// FCIaxpy2d(ci1+ib, ci1buf, na, na, blen);
#pragma omp barrier
_reduce(ci1+ib, ci1bufs, na, na, blen);
// An explicit barrier to ensure ci1 is updated. Without barrier, there may
// occur race condition between FCIaxpy2d and ctr_rhf2e_kern
#pragma omp barrier
}
free(ci1buf);
free(t1buf);
}
free(clink);
}
void FCIcontract_2e_spin1(double *eri, double *ci0, double *ci1,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na);
_LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb);
FCIcompress_link_tril(clinka, link_indexa, na, nlinka);
FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb);
memset(ci1, 0, sizeof(double)*na*nb);
double *ci1bufs[MAX_THREADS];
#pragma omp parallel
{
int strk, ib;
size_t blen;
double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2));
double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2));
ci1bufs[omp_get_thread_num()] = ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static)
for (strk = 0; strk < na; strk++) {
ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf,
blen, blen, blen, strk, ib,
norb, na, nb, nlinka, nlinkb,
clinka, clinkb);
}
// NPomp_dsum_reduce_inplace(ci1bufs, blen*na);
//#pragma omp master
// FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen);
#pragma omp barrier
_reduce(ci1+ib, ci1bufs, na, nb, blen);
// An explicit barrier to ensure ci1 is updated. Without barrier, there may
// occur race condition between FCIaxpy2d and ctr_rhf2e_kern
#pragma omp barrier
}
free(ci1buf);
free(t1buf);
}
free(clinka);
free(clinkb);
}
/*
* eri_ab is mixed integrals (alpha,alpha|beta,beta), |beta,beta) in small strides
*/
static void ctr_uhf2e_kern(double *eri_aa, double *eri_ab, double *eri_bb,
double *ci0, double *ci1, double *ci1buf, double *t1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb)
{
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * (norb+1)/2;
double *t1a = t1buf;
double *t1b = t1a + nnorb*bcount;
double *vt1 = t1b + nnorb*bcount;
memset(t1a, 0, sizeof(double)*nnorb*bcount);
memset(t1b, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1a, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
FCIprog_b_t1(ci0, t1b, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
dgemm_(&TRANS_N, &TRANS_T, &bcount, &nnorb, &nnorb,
&D1, t1a, &bcount, eri_ab, &nnorb, &D0, vt1, &bcount);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1b, &bcount, eri_bb, &nnorb, &D1, vt1, &bcount);
FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1a, &bcount, eri_aa, &nnorb, &D0, vt1, &bcount);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1b, &bcount, eri_ab, &nnorb, &D1, vt1, &bcount);
FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0,
norb, bcount, nlinka, clink_indexa);
}
void FCIcontract_uhf2e(double *eri_aa, double *eri_ab, double *eri_bb,
double *ci0, double *ci1,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na);
_LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb);
FCIcompress_link_tril(clinka, link_indexa, na, nlinka);
FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb);
memset(ci1, 0, sizeof(double)*na*nb);
double *ci1bufs[MAX_THREADS];
#pragma omp parallel
{
int strk, ib;
size_t blen;
double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)*2+2));
double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2));
ci1bufs[omp_get_thread_num()] = ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static)
for (strk = 0; strk < na; strk++) {
ctr_uhf2e_kern(eri_aa, eri_ab, eri_bb, ci0, ci1,
ci1buf, t1buf, blen, strk, ib,
norb, na, nb, nlinka, nlinkb,
clinka, clinkb);
}
// NPomp_dsum_reduce_inplace(ci1bufs, blen*na);
//#pragma omp master
// FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen);
#pragma omp barrier
_reduce(ci1+ib, ci1bufs, na, nb, blen);
// An explicit barrier to ensure ci1 is updated. Without barrier, there may
// occur race condition between FCIaxpy2d and ctr_uhf2e_kern
#pragma omp barrier
}
free(t1buf);
free(ci1buf);
}
free(clinka);
free(clinkb);
}
/*************************************************
* hdiag
*************************************************/
void FCImake_hdiag_uhf(double *hdiag, double *h1e_a, double *h1e_b,
double *jdiag_aa, double *jdiag_ab, double *jdiag_bb,
double *kdiag_aa, double *kdiag_bb,
int norb, int nstra, int nstrb, int nocca, int noccb,
int *occslista, int *occslistb)
{
#pragma omp parallel
{
int j, j0, k0, jk, jk0;
size_t ia, ib;
double e1, e2;
int *paocc, *pbocc;
#pragma omp for schedule(static)
for (ia = 0; ia < nstra; ia++) {
paocc = occslista + ia * nocca;
for (ib = 0; ib < nstrb; ib++) {
e1 = 0;
e2 = 0;
pbocc = occslistb + ib * noccb;
for (j0 = 0; j0 < nocca; j0++) {
j = paocc[j0];
jk0 = j * norb;
e1 += h1e_a[j*norb+j];
for (k0 = 0; k0 < nocca; k0++) { // (alpha|alpha)
jk = jk0 + paocc[k0];
e2 += jdiag_aa[jk] - kdiag_aa[jk];
}
for (k0 = 0; k0 < noccb; k0++) { // (alpha|beta)
jk = jk0 + pbocc[k0];
e2 += jdiag_ab[jk] * 2;
}
}
for (j0 = 0; j0 < noccb; j0++) {
j = pbocc[j0];
jk0 = j * norb;
e1 += h1e_b[j*norb+j];
for (k0 = 0; k0 < noccb; k0++) { // (beta|beta)
jk = jk0 + pbocc[k0];
e2 += jdiag_bb[jk] - kdiag_bb[jk];
}
}
hdiag[ia*nstrb+ib] = e1 + e2 * .5;
}
}
}
}
void FCImake_hdiag(double *hdiag, double *h1e, double *jdiag, double *kdiag,
int norb, int na, int nocc, int *occslst)
{
FCImake_hdiag_uhf(hdiag, h1e, h1e, jdiag, jdiag, jdiag, kdiag, kdiag,
norb, na, na, nocc, nocc, occslst, occslst);
}
static int first1(uint64_t r)
{
#ifdef HAVE_FFS
return ffsll(r) - 1;
#else
int n = 0;
if (r >> (n + 32)) n += 32;
if (r >> (n + 16)) n += 16;
if (r >> (n + 8)) n += 8;
if (r >> (n + 4)) n += 4;
if (r >> (n + 2)) n += 2;
if (r >> (n + 1)) n += 1;
return n;
#endif
}
/*************************************************
* pspace Hamiltonian, ref CPL, 169, 463
*************************************************/
/*
* sub-space Hamiltonian (tril part) of the determinants (stra,strb)
*/
void FCIpspace_h0tril_uhf(double *h0, double *h1e_a, double *h1e_b,
double *g2e_aa, double *g2e_ab, double *g2e_bb,
uint64_t *stra, uint64_t *strb,
int norb, int np)
{
const int d2 = norb * norb;
const int d3 = norb * norb * norb;
#pragma omp parallel
{
int i, j, k, pi, pj, pk, pl;
int n1da, n1db;
uint64_t da, db, str1;
double tmp;
#pragma omp for schedule(dynamic)
for (i = 0; i < np; i++) {
for (j = 0; j < i; j++) {
da = stra[i] ^ stra[j];
db = strb[i] ^ strb[j];
n1da = FCIpopcount_1(da);
n1db = FCIpopcount_1(db);
switch (n1da) {
case 0: switch (n1db) {
case 2:
pi = first1(db & strb[i]);
pj = first1(db & strb[j]);
tmp = h1e_b[pi*norb+pj];
for (k = 0; k < norb; k++) {
if (stra[i] & (1ULL<<k)) {
tmp += g2e_ab[pi*norb+pj+k*d3+k*d2];
}
if (strb[i] & (1ULL<<k)) {
tmp += g2e_bb[pi*d3+pj*d2+k*norb+k]
- g2e_bb[pi*d3+k*d2+k*norb+pj];
}
}
if (FCIcre_des_sign(pi, pj, strb[j]) > 0) {
h0[i*np+j] = tmp;
} else {
h0[i*np+j] = -tmp;
} break;
case 4:
pi = first1(db & strb[i]);
pj = first1(db & strb[j]);
pk = first1((db & strb[i]) ^ (1ULL<<pi));
pl = first1((db & strb[j]) ^ (1ULL<<pj));
str1 = strb[j] ^ (1ULL<<pi) ^ (1ULL<<pj);
if (FCIcre_des_sign(pi, pj, strb[j])
*FCIcre_des_sign(pk, pl, str1) > 0) {
h0[i*np+j] = g2e_bb[pi*d3+pj*d2+pk*norb+pl]
- g2e_bb[pi*d3+pl*d2+pk*norb+pj];
} else {
h0[i*np+j] =-g2e_bb[pi*d3+pj*d2+pk*norb+pl]
+ g2e_bb[pi*d3+pl*d2+pk*norb+pj];
} } break;
case 2: switch (n1db) {
case 0:
pi = first1(da & stra[i]);
pj = first1(da & stra[j]);
tmp = h1e_a[pi*norb+pj];
for (k = 0; k < norb; k++) {
if (strb[i] & (1ULL<<k)) {
tmp += g2e_ab[pi*d3+pj*d2+k*norb+k];
}
if (stra[i] & (1ULL<<k)) {
tmp += g2e_aa[pi*d3+pj*d2+k*norb+k]
- g2e_aa[pi*d3+k*d2+k*norb+pj];
}
}
if (FCIcre_des_sign(pi, pj, stra[j]) > 0) {
h0[i*np+j] = tmp;
} else {
h0[i*np+j] = -tmp;
} break;
case 2:
pi = first1(da & stra[i]);
pj = first1(da & stra[j]);
pk = first1(db & strb[i]);
pl = first1(db & strb[j]);
if (FCIcre_des_sign(pi, pj, stra[j])
*FCIcre_des_sign(pk, pl, strb[j]) > 0) {
h0[i*np+j] = g2e_ab[pi*d3+pj*d2+pk*norb+pl];
} else {
h0[i*np+j] =-g2e_ab[pi*d3+pj*d2+pk*norb+pl];
} } break;
case 4: switch (n1db) {
case 0:
pi = first1(da & stra[i]);
pj = first1(da & stra[j]);
pk = first1((da & stra[i]) ^ (1ULL<<pi));
pl = first1((da & stra[j]) ^ (1ULL<<pj));
str1 = stra[j] ^ (1ULL<<pi) ^ (1ULL<<pj);
if (FCIcre_des_sign(pi, pj, stra[j])
*FCIcre_des_sign(pk, pl, str1) > 0) {
h0[i*np+j] = g2e_aa[pi*d3+pj*d2+pk*norb+pl]
- g2e_aa[pi*d3+pl*d2+pk*norb+pj];
} else {
h0[i*np+j] =-g2e_aa[pi*d3+pj*d2+pk*norb+pl]
+ g2e_aa[pi*d3+pl*d2+pk*norb+pj];
}
} break;
}
} }
}
}
void FCIpspace_h0tril(double *h0, double *h1e, double *g2e,
uint64_t *stra, uint64_t *strb, int norb, int np)
{
FCIpspace_h0tril_uhf(h0, h1e, h1e, g2e, g2e, g2e, stra, strb, norb, np);
}
/***********************************************************************
*
* With symmetry
*
* Note the ordering in eri and the index in link_index
* eri is a tril matrix, it should be reordered wrt the irrep of the
* direct product E_i^j. The 2D array eri(ij,kl) is a diagonal block
* matrix. Each block is associated with an irrep.
* link_index[str_id,pair_id,0] which is the index of pair_id, should be
* reordered wrt the irreps accordingly
*
* dimirrep stores the number of occurence for each irrep
*
***********************************************************************/
static void pick_link_by_irrep(_LinkTrilT *clink, int *link_index,
int nstr, int nlink, int eri_irrep)
{
int i, j, k;
for (i = 0; i < nstr; i++) {
for (k = 0, j = 0; k < nlink; k++) {
if (link_index[k*4+1] == eri_irrep) {
clink[j].ia = link_index[k*4+0];
clink[j].addr = link_index[k*4+2];
clink[j].sign = link_index[k*4+3];
j++;
}
}
if (j < nlink) {
clink[j].sign = 0;
}
clink += nlink;
link_index += nlink * 4;
}
}
static void ctr_rhf2esym_kern1(double *eri, double *ci0, double *ci1ab,
double *ci1buf, double *t1buf, int ncol_ci1buf,
int bcount, int stra_id, int strb_id,
int nnorb, int nb_intermediate,
int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
memset(t1, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id,
0, nb, nlinka, clink_indexa);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount);
FCIspread_b_t1(ci1ab, vt1, bcount, stra_id, strb_id,
0, nb_intermediate, nlinkb, clink_indexb);
spread_bufa_t1(ci1buf, vt1, bcount, bcount, stra_id, 0,
0, ncol_ci1buf, nlinka, clink_indexa);
}
static void loop_c2e_symm1(double *eri, double *ci0, double *ci1aa, double *ci1ab,
int nnorb, int na_intermediate, int nb_intermediate,
int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clinka, _LinkTrilT *clinkb)
{
double *ci1bufs[MAX_THREADS];
#pragma omp parallel
{
int strk, ib;
size_t blen;
double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*nnorb*2+2));
double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2));
ci1bufs[omp_get_thread_num()] = ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static)
for (strk = 0; strk < na_intermediate; strk++) {
ctr_rhf2esym_kern1(eri, ci0, ci1ab, ci1buf, t1buf,
blen, blen, strk, ib,
nnorb, nb_intermediate, na, nb,
nlinka, nlinkb, clinka, clinkb);
}
// NPomp_dsum_reduce_inplace(ci1bufs, blen*na);
//#pragma omp master
// FCIaxpy2d(ci1aa+ib, ci1buf, na, nb, blen);
#pragma omp barrier
_reduce(ci1aa+ib, ci1bufs, na, nb, blen);
// An explicit barrier to ensure ci1 is updated. Without barrier, there may
// occur race condition between FCIaxpy2d and ctr_rhf2esym_kern1
#pragma omp barrier
}
free(ci1buf);
free(t1buf);
}
}
#define TOTIRREPS 8
void FCIcontract_2e_symm1(double **eris, double **ci0, double **ci1,
int norb, int *nas, int *nbs, int nlinka, int nlinkb,
int **linka, int **linkb, int *dimirrep, int wfnsym)
{
int i;
int na = 0;
int nb = 0;
for (i = 0; i < TOTIRREPS; i++) {
na = MAX(nas[i], na);
nb = MAX(nbs[i], nb);
}
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na);
_LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb);
int ai_ir, stra_ir, strb_ir, intera_ir, interb_ir, ma, mb;
for (stra_ir = 0; stra_ir < TOTIRREPS; stra_ir++) {
for (ai_ir = 0; ai_ir < TOTIRREPS; ai_ir++) {
strb_ir = wfnsym^stra_ir;
ma = nas[stra_ir];
mb = nbs[strb_ir];
if (ma > 0 && mb > 0 && dimirrep[ai_ir] > 0) {
intera_ir = ai_ir^stra_ir;
interb_ir = ai_ir^strb_ir;
// clinka for inter_ir*ai_ir -> stra_ir
pick_link_by_irrep(clinka, linka[intera_ir],
nas[intera_ir], nlinka, ai_ir);
// clinka for strb_ir*ai_ir -> inter_ir
pick_link_by_irrep(clinkb, linkb[strb_ir],
nbs[strb_ir], nlinkb, ai_ir);
loop_c2e_symm1(eris[ai_ir], ci0[stra_ir],
ci1[stra_ir], ci1[intera_ir],
dimirrep[ai_ir], nas[intera_ir],
nbs[interb_ir], ma, mb,
nlinka, nlinkb, clinka, clinkb);
}
} }
free(clinka);
free(clinkb);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t3+Nx,256),floord(Nt+Nx-4,256)),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),256*t4+254),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.