source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
rose_firstprivate.c | int g;
void foo()
{
int i;
int x;
int y = 1;
int a[100];
int b[100];
#pragma omp parallel for private (y,i) firstprivate (x)
for (i = 0; i <= 99; i += 1) {
y = x + 1 + g;
b[i] = x + 1 + g;
// x=...
// ... =x
}
x = g;
}
int a[100];
void foo2()
{
int i;
int tmp;
tmp = 10;
// It would be wrong to parallelize the following loop
// since the true dependence between tmp in an iteration
// and tmp in the following iteration.
// Even firstprivate cannot help this.
for (i = 0; i <= 99; i += 1) {
a[i] = tmp;
tmp = a[i] + i;
}
printf("a[0]=%d\n",a[0]);
printf("a[40]=%d\n",a[40]);
printf("a[99]=%d\n",a[99]);
}
|
residualbased_elimination_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <unordered_set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/**
* @brief Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
// KRATOS_INFO("ResidualBasedEliminationBuilderAndSolver") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
//getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const double start_build = OpenMPUtils::GetCurrentTime();
// assemble all elements
#pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mLockArray);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mLockArray);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& rElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& rConditions = rModelPart.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = rElements.ptr_begin(); it != rElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = rConditions.ptr_begin(); it != rConditions.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
}
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& rElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& rConditions = rModelPart.Conditions();
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = rElements.ptr_begin(); it != rElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = rConditions.ptr_begin(); it != rConditions.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId);
}
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, b);
SystemSolve(A, Dx, b);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
//resetting to zero the vector of reactions
if(BaseType::mCalculateReactionsFlag)
{
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
}
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& pConditions = rModelPart.Conditions();
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
// assemble all elements
#pragma omp parallel firstprivate( RHS_Contribution, EquationId)
{
const int nelements = static_cast<int>(pElements.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i<nelements; i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
// Calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
// Assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
// assemble all conditions
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++)
{
auto it = pConditions.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
}
#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
ConditionsArrayType& pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp parallel for firstprivate(nconditions, ElementalDofList)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(it->get());
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef USE_LOCKS_IN_ASSEMBLY
if (mLockArray.size() != 0)
{
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_destroy_lock(&mLockArray[i]);
}
mLockArray.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_init_lock(&mLockArray[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
//KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if (BaseType::mpReactionsVector->size() != reactions_vector_size)
BaseType::mpReactionsVector->resize(reactions_vector_size, false);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme, rModelPart, b);
// Updating variables
std::size_t i;
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) {
i = (*it2)->EquationId();
if (i >= BaseType::mEquationSystemSize) {
i -= BaseType::mEquationSystemSize;
(*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i];
}
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
this->mpReactionsVector.reset();
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
#ifdef USE_LOCKS_IN_ASSEMBLY
std::vector<omp_lock_t> mLockArray;
#endif
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This function does the assembling of the LHS and RHS
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling
*/
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
#ifdef USE_LOCKS_IN_ASSEMBLY
,std::vector< omp_lock_t >& lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
#else
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
#endif
AssembleRowContributionFreeDofs(A, LHS_Contribution, i_global, i_local, EquationId);
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that computation of reactions is not performed here!
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<std::size_t> > temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto el_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) {
auto it_elem = el_begin + i_elem;
pScheme->EquationId( *(it_elem.base()), ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) {
auto it_cond = cond_begin + i_cond;
pScheme->Condition_EquationId( *(it_cond.base()), ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) {
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
// virtual void ConstructMatrixStructure(
// TSystemMatrixType& A,
// ElementsContainerType& rElements,
// ConditionsArrayType& rConditions,
// ProcessInfo& CurrentProcessInfo)
// {
//
// std::size_t equation_size = A.size1();
// std::vector<std::vector<std::size_t> > indices(equation_size);
// // std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix));
//
// Element::EquationIdVectorType ids(3, 0);
// for (typename ElementsContainerType::iterator i_element = rElements.begin(); i_element != rElements.end(); i_element++)
// {
// (i_element)->EquationIdVector(ids, CurrentProcessInfo);
//
// for (std::size_t i = 0; i < ids.size(); i++)
// if (ids[i] < equation_size)
// {
// std::vector<std::size_t>& row_indices = indices[ids[i]];
// for (std::size_t j = 0; j < ids.size(); j++)
// if (ids[j] < equation_size)
// {
// AddUnique(row_indices, ids[j]);
// //indices[ids[i]].push_back(ids[j]);
// }
// }
//
// }
//
// for (typename ConditionsArrayType::iterator i_condition = rConditions.begin(); i_condition != rConditions.end(); i_condition++)
// {
// (i_condition)->EquationIdVector(ids, CurrentProcessInfo);
// for (std::size_t i = 0; i < ids.size(); i++)
// if (ids[i] < equation_size)
// {
// std::vector<std::size_t>& row_indices = indices[ids[i]];
// for (std::size_t j = 0; j < ids.size(); j++)
// if (ids[j] < equation_size)
// {
// AddUnique(row_indices, ids[j]);
// // indices[ids[i]].push_back(ids[j]);
// }
// }
// }
//
// //allocating the memory needed
// int data_size = 0;
// for (std::size_t i = 0; i < indices.size(); i++)
// {
// data_size += indices[i].size();
// }
// A.reserve(data_size, false);
//
// //filling with zero the matrix (creating the structure)
// Timer::Start("MatrixStructure");
//#ifndef _OPENMP
// for (std::size_t i = 0; i < indices.size(); i++)
// {
// std::vector<std::size_t>& row_indices = indices[i];
// std::sort(row_indices.begin(), row_indices.end());
//
// for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++)
// {
// A.push_back(i, *it, 0.00);
// }
// row_indices.clear();
// }
//#else
// int number_of_threads = omp_get_max_threads();
// vector<unsigned int> matrix_partition;
// CreatePartition(number_of_threads, indices.size(), matrix_partition);
// if (this->GetEchoLevel() > 2)
// {
// KRATOS_WATCH(matrix_partition);
// }
// for (int k = 0; k < number_of_threads; k++)
// {
// #pragma omp parallel
// if (omp_get_thread_num() == k)
// {
// for (std::size_t i = matrix_partition[k]; i < matrix_partition[k + 1]; i++)
// {
// std::vector<std::size_t>& row_indices = indices[i];
// std::sort(row_indices.begin(), row_indices.end());
//
// for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++)
// {
// A.push_back(i, *it, 0.00);
// }
// row_indices.clear();
// }
// }
// }
//#endif
// Timer::Stop("MatrixStructure");
// }
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
/**
* @brief This function is equivalent to the AssembleRowContribution of the block builder and solver
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped
*/
inline void AssembleRowContributionFreeDofs(TSystemMatrixType& A, const Matrix& Alocal, const std::size_t i, const std::size_t i_local, const Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
const std::size_t left_limit = index1_vector[i];
// Find the first entry
// We iterate over the equation ids until we find the first equation id to be considered
// We count in which component we find an ID
std::size_t last_pos = 0;
std::size_t last_found = 0;
std::size_t counter = 0;
for(std::size_t j=0; j < EquationId.size(); ++j) {
++counter;
const std::size_t j_global = EquationId[j];
if (j_global < BaseType::mEquationSystemSize) {
last_pos = ForwardFind(j_global,left_limit,index2_vector);
last_found = j_global;
break;
}
}
// If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered
if (counter <= EquationId.size()) {
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,counter - 1);
#pragma omp atomic
r_a += v_a;
#else
values_vector[last_pos] += Alocal(i_local,counter - 1);
#endif
// Now find all of the other entries
std::size_t pos = 0;
for(std::size_t j = counter; j < EquationId.size(); ++j) {
std::size_t id_to_find = EquationId[j];
if (id_to_find < BaseType::mEquationSystemSize) {
if(id_to_find > last_found)
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
else if(id_to_find < last_found)
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
else
pos = last_pos;
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
#else
values_vector[pos] += Alocal(i_local,j);
#endif
last_found = id_to_find;
last_pos = pos;
}
}
}
}
inline std::size_t ForwardFind(const std::size_t id_to_find,
const std::size_t start,
const std::size_t* index_vector)
{
std::size_t pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline std::size_t BackwardFind(const std::size_t id_to_find,
const std::size_t start,
const std::size_t* index_vector)
{
std::size_t pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void AssembleRHS(
TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
21_omp_task_struct.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s --check-prefix=CHECK-opt
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
extern void MPI_call(void*);
typedef struct {
int x;
float y;
} X;
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: %x = alloca
// check-inst: %0 = bitcast %struct.X* %x to i8*
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 {{[0-9]+}}, i64 1)
X x;
#pragma omp parallel
{
#pragma omp task
{ MPI_call(&x); }
}
}
// FIXME one alloca is of the anon struct detected as OMP task struct related (need refinement of condition?)
// The Pattern: a = alloca struct; b = task_alloc; mem_cpy a to b;
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 2
// CHECK-NEXT: Global : 0
// CHECK-opt: TypeArtPass [Heap & Stack]
// CHECK-opt-NEXT: Malloc : 0
// CHECK-opt-NEXT: Free : 0
// CHECK-opt-NEXT: Alloca : 1
// CHECK-opt-NEXT: Global : 0 |
omp_ssyr2k_batch.c | /**
* @file omp_ssyr2k_batch.c
*
* @brief BBLAS omp_ssyr2k_batch float routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @generated from ./bblas_omp/omp_zsyr2k_batch.c normal z -> s, Mon Jun 6 09:44:14 2016
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define REAL
/**
Purpose
-------
<b>ssyr2k_batch</b> is a batch version of ssyr2k.
It performs one of the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T +
beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] +
beta[i]*arrayC[i],
where alpha[i] and beta[i] are scalars, arrayC[i] is an N[i] by N[i] sym-
metric matrix and arrayA[i] and arrayB[i] are N[i] by K[i] matrices in the
first case and K[i] by N[i] matrices in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i] is to
be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
the matrix is to be referenced.
- = 'BblasLower' Only the lower triangular part of
the matrix is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T +
alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i]
- = 'BblasTrans' arrayC[i] = alpha[i]*arrayA[i]**T *arrayB[i] +
alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrices arrayA[i] and arrayB[i],
and upon entry with trans[i] = 'BblasTrans',
K[i] specifies the number of rows of the matrices arrayA[i] and arrayB[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>real_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a REAL matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
arrayB Array of pointers.
Each element arrayB[i] is a pointer to a REAL matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayB[i] must contain the elements of arrayB[i], otherwise
the leading K[i] by N[i] part of the arrayB[i] must contain the
elements of arrayB[i].
@param[in]
ldb Array of <tt>int</tt>.
On entry, ldb[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
ldb[i] must be at least max( 1, N[i] ), otherwise ldb[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>real_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a REAL matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the symmetric
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper triangular part
of the updated matrix.
Before entry with uplo[i] = 'BlasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the symmetric matrix and the
strictly upper triangular part of arrayC[i] is not referenced.
On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith ssyr2k in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_ssyr2k_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const float *alpha,
const float **arrayA, const int *lda,
const float **arrayB, const int *ldb,
const float *beta, float **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA, LDB;
char func_name[15] = "ssyr2k_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
LDB = N[first_index];
} else
{
LDA = K[first_index];
LDB = K[first_index];
}
if (lda[first_index] < max(1,LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldb[first_index] < max(1, LDB))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDB;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 || K[first_index] == 0 ||
(alpha[first_index] == (float)0.0 ||
beta[first_index] == (float)1.0))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_ssyr2k */
cblas_ssyr2k(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
arrayB[batch_iter],
ldb[first_index],
(beta[first_index]),
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private (batch_iter, LDA, LDB)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans)
{
LDA = N[batch_iter];
LDB = N[batch_iter];
} else
{
LDA = K[batch_iter];
LDB = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldb[batch_iter] < max(1, LDB))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter);
info[batch_iter] = BBLAS_ERR_LDB;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 || K[batch_iter] == 0 ||
((alpha[batch_iter] == (float)0.0) &&
beta[batch_iter] == (float)1.0))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_ssyr2k(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
arrayB[batch_iter],
ldb[batch_iter],
(beta[batch_iter]),
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef REAL
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
atDepth=MagickTrue;
if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->matte != MagickFalse))
type=GrayscaleMatteType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].red),range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].green),range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].blue),range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].opacity),range),
range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelRed(q)),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelGreen(q)),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelBlue(q)),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelOpacity(q)),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace);
(void) NormalizeImage(image);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
GB_binop__isne_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isne_fc64
// A.*B function (eWiseMult): GB_AemultB__isne_fc64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__isne_fc64
// C+=b function (dense accum): GB_Cdense_accumb__isne_fc64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_fc64
// C=scalar+B GB_bind1st__isne_fc64
// C=scalar+B' GB_bind1st_tran__isne_fc64
// C=A+scalar GB_bind2nd__isne_fc64
// C=A'+scalar GB_bind2nd_tran__isne_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_isne (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_FC64_isne (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_FC64 || GxB_NO_ISNE_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isne_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isne_fc64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isne_fc64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isne_fc64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isne_fc64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isne_fc64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_isne (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isne_fc64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_isne (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_isne (x, aij) ; \
}
GrB_Info GB_bind1st_tran__isne_fc64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_isne (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__isne_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) {
for (t4=max(max(ceild(t1-126,128),ceild(8*t2-Nz-499,512)),ceild(8*t3-Ny-499,512));t4<=min(min(floord(4*Nt+Nx-9,512),floord(4*t1+Nx-1,512)),floord(8*t3+Nx-5,512));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),128*t4+126);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
evaluation.c | #include "common.h"
static void clear_buffers(uint64_t* restrict A, uint64_t* restrict B, const int s)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0;i<s;i++)
A[i] = B[i] = 0;
}
#ifdef _OPENMP
static int top_down_step(const int level, const int nodes, const int num_frontier, const int max_degree,
const int* restrict degree, const int* restrict adjacency, int* restrict frontier,
int* restrict next, int* restrict distance, char* restrict bitmap)
{
int count = 0;
int local_frontier[nodes];
#pragma omp parallel private(local_frontier)
{
int local_count = 0;
#pragma omp for nowait
for(int i=0;i<num_frontier;i++){
int v = frontier[i];
for(int j=0;j<degree[v];j++){
int n = *(adjacency + v * max_degree + j); // adjacency[v][j];
if(bitmap[n] == NOT_VISITED){
bitmap[n] = VISITED;
distance[n] = level;
local_frontier[local_count++] = n;
}
}
} // end for i
#pragma omp critical
{
memcpy(&next[count], local_frontier, local_count*sizeof(int));
count += local_count;
}
}
return count;
}
#else
static int top_down_step(const int level, const int nodes, const int num_frontier, const int max_degree,
const int* restrict degree, const int* restrict adjacency, int* restrict frontier,
int* restrict next, int* restrict distance, char* restrict bitmap)
{
int count = 0;
for(int i=0;i<num_frontier;i++){
int v = frontier[i];
for(int j=0;j<degree[v];j++){
int n = *(adjacency + v * max_degree + j); // int n = adjacency[v][j];
if(bitmap[n] == NOT_VISITED){
bitmap[n] = VISITED;
distance[n] = level;
next[count++] = n;
}
}
}
return count;
}
#endif
static bool bfs(const int nodes, const int max_degree, const int* restrict degree,
const int adjacency[nodes][max_degree], const int based_nodes, const int height,
const int based_height, const int groups, int *diameter, double *ASPL)
{
char *bitmap = malloc(sizeof(char) * nodes);
int *frontier = malloc(sizeof(int) * nodes);
int *distance = malloc(sizeof(int) * nodes);
int *next = malloc(sizeof(int) * nodes);
bool reached = true;
double sum = 0.0;
*diameter = 0;
for(int s=rank;s<based_nodes;s+=procs){
int s1 = (s/based_height) * height + (s%based_height);
int num_frontier = 1, level = 0;
for(int i=0;i<nodes;i++)
bitmap[i] = NOT_VISITED;
frontier[0] = s1;
distance[s1] = level;
bitmap[s1] = VISITED;
while(1){
num_frontier = top_down_step(level++, nodes, num_frontier, max_degree, degree,
(int *)adjacency, frontier, next, distance, bitmap);
if(num_frontier == 0) break;
int *tmp = frontier;
frontier = next;
next = tmp;
}
*diameter = MAX(*diameter, level-1);
for(int i=0;i<nodes;i++){
if(i == s1) continue;
if(bitmap[i] == NOT_VISITED)
reached = false;
sum += (distance[i] + 1) * groups;
}
}
free(bitmap);
free(frontier);
free(distance);
free(next);
MPI_Allreduce(MPI_IN_PLACE, &reached, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD);
if(!reached)
return false;
MPI_Allreduce(MPI_IN_PLACE, diameter, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
*ASPL = sum / ((((double)nodes-1)*nodes));
return true;
}
static bool matrix_op(const int nodes, const int max_degree, const int* restrict degree, const int* restrict adjacency,
const int based_nodes, const int height, const int based_height,
const int groups, int *diameter, double *ASPL, const int* rotate_hash)
{
unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS;
unsigned int chunk = (elements+(procs-1))/procs;
size_t s = nodes*chunk*sizeof(uint64_t);
uint64_t* A = malloc(s); // uint64_t A[nodes][chunk];
uint64_t* B = malloc(s); // uint64_t B[nodes][chunk];
int parsize = (elements+(chunk-1))/chunk;
double sum = 0.0;
*diameter = 1;
for(int t=rank;t<parsize;t+=procs){
uint64_t kk, l;
clear_buffers(A, B, nodes*chunk);
for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<based_nodes; l++){
unsigned int offset = (UINT64_BITS*t*chunk+l)*chunk+l/UINT64_BITS;
A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS));
}
for(kk=0;kk<nodes;kk++){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i=0;i<nodes;i++){
int ii = rotate_hash[i];
for(int j=0;j<degree[i];j++){
int n = *(adjacency + i * max_degree + j); // int n = adjacency[i][j];
int nn = rotate_hash[n];
for(int k=0;k<chunk;k++)
B[ii*chunk+k] |= A[nn*chunk+k];
}
}
uint64_t num = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:num)
#endif
for(int i=0;i<chunk*nodes;i++)
num += POPCNT(B[i]);
if(num == (uint64_t)nodes*l) break;
// swap A <-> B
uint64_t* tmp = A;
A = B;
B = tmp;
sum += ((double)nodes * l - num) * groups;
}
*diameter = MAX(*diameter, kk+1);
}
MPI_Allreduce(MPI_IN_PLACE, diameter, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
sum += (double)nodes * (nodes - 1);
free(A);
free(B);
if(*diameter > nodes){
// PRINT_R0("This graph is not connected graph.\n");
return false;
}
*ASPL = sum / (((double)nodes-1)*nodes);
return true;
}
bool evaluation(const int nodes, const int max_degree, const int* restrict degree, const int groups,
const int* restrict adjacency, const int based_nodes,const int height,
const int based_height, int *diameter, double *ASPL, const bool enable_bfs, const int* rotate_hash)
{
timer_start(TIMER_APSP);
bool flag;
if(enable_bfs)
flag = bfs(nodes, max_degree, degree, (const int (*)[max_degree])adjacency, based_nodes, height,
based_height, groups, diameter, ASPL);
else
flag = matrix_op(nodes, max_degree, degree, adjacency, based_nodes, height,
based_height, groups, diameter, ASPL, rotate_hash);
timer_stop(TIMER_APSP);
return flag;
}
|
GB_unaryop__lnot_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int64
// op(A') function: GB_tran__lnot_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int64
(
uint8_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LADMM.h | #ifndef LADMM_H
#define LADMM_H
#include "Matrix.h"
#include <string>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <stdio.h> /* printf */
#include <time.h>
#include <fstream>
#include <algorithm>
#include <iomanip>
#include <ctime>
#include <sstream>
#include <omp.h>
//#include "cmd_line.h"
//This class implements the method LADMM
/*
The optimization problem to solve is:
min \sum_i f^i(A_ix)+ \sum_i h^i(y^i)+ g(x)
s.t. Mx= y
Assumption 1: For each i, f_i is smooth, g(x) is seperable
*/
template<typename L, typename D>
class LADMM
{
private:
std::vector<D> Ax;
std::vector<D> old_Ax;
std::vector<D> Mx;
std::vector<D> old_Mx;
std::vector<D> Mty;
std::vector<D> old_Mty;
std::vector<D> Mtlambda;
std::vector<D> old_Mtlambda;
std::vector<D> gradient;
std::vector<D> MtMx;
protected:
Matrix<L, D> data_A;
Matrix<L, D> data_M;
std::vector<D> x;
std::vector<D> old_x;
std::vector<D> y;
std::vector<D> old_y;
std::vector<D> lambda;
std::vector<D> old_lambda;
D beta;
L m_1;
L m_2;
L m_3;
D rho;
D tau;
D sigma;
D L_phi;
D function_value;
D infeas;
L print_every_N_ADMM;
D running_time_ADMM;
L nb_outer_iters;
ofstream samp_ADMM;
public:
D lambda_f;
D mu_g;
D lambda1;
D lambda2;
D L_h;
virtual inline D value_of_f_j(D, L){return D(NULL);}
virtual inline D value_of_h_j(D, L){return D(NULL);}
virtual inline D gradient_of_f_j(D, L){return D(NULL);}
virtual inline D prox_of_h_j(D,D, L){return D(NULL);}
virtual inline D value_of_g_j(D, L){return D(NULL);}
virtual inline D prox_of_g_j(D, D, L){return D(NULL);}
virtual inline void set_matrix_M(){}
virtual inline void set_matrix_A(){}
/*
LADMM(const char* matrix_file, const char* matrix_file2)
: Primal_Dual_LOOPLESS_Katyusha0<L,D>(),data_A(matrix_file), data_M(matrix_file2)
{
this->matrix_merge(data_A,data_M);
this->gamma=1;
}
*/
inline void set_L_phi(){
if (data_A.nsamples== 0){
L_phi= 0;
}
else{
L_phi= compute_lambda_max_A(10);
}
}
D compute_lambda_max_A(L K){
std::vector<D> bk(data_A.nfeatures);
for (L j=0;j<data_A.nfeatures;j++)
{
bk[j]=1;
}
std::vector<D> yk(data_A.nsamples);
D normk;
D tmp;
for(L kk=0;kk<K;kk++){
for (L i=0;i<data_A.nsamples;i++){
tmp=0;
for (L k = data_A.ptr[i]; k < data_A.ptr[i + 1]; k++)
{
L j=data_A.row_idx[k];
tmp+=data_A.A[k]*bk[j];
}
yk[i]=tmp;
}
normk=0;
for (L j=0;j<data_A.nfeatures;j++){
bk[j]=0;
for (L k = data_A.ptr_t[j]; k < data_A.ptr_t[j + 1]; k++)
{
L i=data_A.col_idx[k];
bk[j]+=data_A.A_t[k]*yk[i]*lambda_f;
}
normk+=bk[j]*bk[j];
}
normk=sqrt(normk);
for (L j=0;j<data_A.nfeatures;j++)
{bk[j]=bk[j]/normk; }
}
cout<<endl;
D res=0;
normk=0;
for (L i=0;i<data_A.nsamples;i++){
tmp=0;
for (L k = data_A.ptr[i]; k < data_A.ptr[i + 1]; k++)
{
L j=data_A.row_idx[k];
tmp+=data_A.A[k]*bk[j];
}
yk[i]=tmp;
normk+=yk[i]*yk[i];
}
std::vector<D> bk2(data_A.nfeatures);
for (L j=0;j<data_A.nfeatures;j++){
bk2[j]=0;
for (L k = data_A.ptr_t[j]; k < data_A.ptr_t[j + 1]; k++)
{
L i=data_A.col_idx[k];
bk2[j]+=data_A.A_t[k]*yk[i]*lambda_f;
}
}
for (L j=0;j<data_A.nfeatures;j++)
res+=bk2[j]*bk[j];
return res;
}
D compute_lambda_max_M(L K){
std::vector<D> bk(data_M.nfeatures);
for (L j=0;j<data_M.nfeatures- 1;j++)
{
bk[j]=1;
}
bk[data_M.nfeatures- 1]= 2;
std::vector<D> yk(data_M.nsamples);
D normk;
D tmp;
for(L kk=0;kk<K;kk++){
for (L i=0;i<data_M.nsamples;i++){
tmp=0;
for (L k = data_M.ptr[i]; k < data_M.ptr[i + 1]; k++)
{
L j=data_M.row_idx[k];
tmp+=data_M.A[k]*bk[j];
}
yk[i]=tmp;
}
normk=0;
for (L j=0;j<data_M.nfeatures;j++){
bk[j]=0;
for (L k = data_M.ptr_t[j]; k < data_M.ptr_t[j + 1]; k++)
{
L i=data_M.col_idx[k];
bk[j]+=data_M.A_t[k]*yk[i];
}
normk+=bk[j]*bk[j];
}
normk=sqrt(normk);
for (L j=0;j<data_M.nfeatures;j++)
{bk[j]=bk[j]/normk; }
}
D res=0;
normk=0;
for (L i=0;i<data_M.nsamples;i++){
tmp=0;
for (L k = data_M.ptr[i]; k < data_M.ptr[i + 1]; k++)
{
L j=data_M.row_idx[k];
tmp+=data_M.A[k]*bk[j];
}
yk[i]=tmp;
normk+=yk[i]*yk[i];
}
std::vector<D> bk2(data_M.nfeatures);
for (L j=0;j<data_M.nfeatures;j++){
bk2[j]=0;
for (L k = data_M.ptr_t[j]; k < data_M.ptr_t[j + 1]; k++)
{
L i=data_M.col_idx[k];
bk2[j]+=data_M.A_t[k]*yk[i];
}
}
for (L j=0;j<data_M.nfeatures;j++)
res+=bk2[j]*bk[j];
return res;
}
L get_nb_features(){
return data_A.get_d();
}
/* inline D get_lambda1(){return lambda1;}
inline D get_lambda2(){return lambda2;}
*/
void update_x(){
#pragma omp parallel for
for (L i= 0; i< m_1; i++){
gradient[i]= 0;
for (L k = data_A.ptr_t[i]; k < data_A.ptr_t[i + 1];k++)
{
L j=data_A.col_idx[k];
gradient[i]+= data_A.A_t[k]*gradient_of_f_j(Ax[j],j);
}
MtMx[i]= 0;
for (L k = data_M.ptr_t[i]; k < data_M.ptr_t[i + 1];k++)
{
L j=data_M.col_idx[k];
MtMx[i]+= data_M.A_t[k]*Mx[j];
}
x[i]= prox_of_g_j(x[i]- (Mtlambda[i]+ gradient[i]+ beta*MtMx[i]- beta*Mty[i])/(beta*tau+ L_phi), beta*tau+ L_phi, i);
}
#pragma omp parallel
{
#pragma omp for
for (L i= 0; i< m_2; i++){
Ax[i]= compute_AiTx(i);
}
#pragma omp for
for (L i= 0; i< m_3; i++){
Mx[i]= compute_MiTx(i);
}
}
}
void update_y(){
#pragma omp parallel for
for (L i=0; i< m_3; i++){
y[i]= prox_of_h_j(y[i]+ (lambda[i]+ beta*Mx[i]- beta*y[i])/(beta*sigma), beta*sigma, i);
}
#pragma omp parallel for
for (L i=0; i< m_1; i++){
Mty[i]= compute_MTiTy(i);
}
}
D compute_AiTx(L i){
D res=0;
for (L k = data_A.ptr[i]; k < data_A.ptr[i + 1];k++)
{
L j=data_A.row_idx[k];
res+= data_A.A[k]*x[j];
}
return res;
}
D compute_MiTx(L i){
D res=0;
for (L k = data_M.ptr[i]; k < data_M.ptr[i + 1];k++)
{
L j=data_M.row_idx[k];
res+= data_M.A[k]*x[j];
}
return res;
}
D compute_MTiTy(L i){
D res=0;
for (L k = data_M.ptr_t[i]; k < data_M.ptr_t[i + 1];k++)
{
L j=data_M.col_idx[k];
res+= data_M.A_t[k]*y[j];
}
return res;
}
D compute_MTiTlambda(L i){
D res=0;
for (L k = data_M.ptr_t[i]; k < data_M.ptr_t[i + 1];k++)
{
L j=data_M.col_idx[k];
res+= data_M.A_t[k]*lambda[j];
}
return res;
}
void compute_function_value(){
D res= 0;
for (L i= 0; i< data_A.nfeatures; i++){
res+= value_of_g_j(x[i],i);
}
for (L i= 0; i< data_M.nsamples; i++){
res+= value_of_h_j(Mx[i],i);
}
for (L i= 0; i<data_A.nsamples; i++){
res+= value_of_f_j(Ax[i],i);
}
function_value= res;
}
void compute_infeasibility(){
D res = 0;
for (L i= 0; i< data_M.nsamples; i++){
res+= (Mx[i]- data_M.b[i])*(Mx[i]- data_M.b[i]);
}
infeas= sqrt(res);
}
void update_lambda(){
#pragma omp for
for(L j=0;j<m_3;j++)
{
lambda[j]= lambda[j]+ beta*(Mx[j]- y[j]);
}
#pragma omp for
for (L i=0; i< m_1; i++){
Mtlambda[i]= compute_MTiTlambda(i);
}
}
void Initialize(D beta_0, D val_rho, vector<D> & x0,vector<D> & y0, vector<D> & lambda0){
cout<<"start initializing"<<endl;
set_matrix_M();
set_matrix_A();
m_1=data_A.get_d();
m_2=data_A.get_n();
m_3=data_M.get_n();
cout<<"m_1="<<m_1<<endl;
cout<<"m_2="<<m_2<<endl;
cout<<"m_3="<<m_3<<endl;
beta=beta_0;
//tau= data_A.nsamples;
tau= 1.02*compute_lambda_max_M(10);
sigma= 1;
rho=val_rho;
x.resize(m_1,0);
old_x.resize(m_1,0);
y.resize(m_3,0);
old_y.resize(m_3,0);
lambda.resize(m_3,0);
old_lambda.resize(m_3,0);
for(L i=0;i<m_1;i++){
x[i]=x0[i];
}
for(L j=0;j<m_3;j++){
y[j]=y0[j];
}
for(L j=0;j<m_3;j++){
lambda[j]=lambda0[j];
}
Ax.clear();
Ax.resize(m_2,0);
Mx.clear();
Mx.resize(m_3,0);
Mty.clear();
Mty.resize(m_1,0);
Mtlambda.clear();
Mtlambda.resize(m_1,0);
gradient.clear();
gradient.resize(m_1,0);
MtMx.clear();
MtMx.resize(m_1,0);
L_phi= 0;
set_L_phi();
cout<< "L_phi= "<< L_phi << " beta= "<< beta<< " rho= "<< rho<< " tau= "<< tau<< " sigma= "<< sigma<<endl;
}
void reset_everything(){
beta*=rho;
}
inline void compute_and_record_res(){
if(nb_outer_iters%print_every_N_ADMM==0){
compute_function_value();
compute_infeasibility();
cout<<setprecision(9)<<"Iteration: "<<nb_outer_iters<<"; time="<<running_time_ADMM<< "; function value="<<function_value<< "; infeasibility="<< infeas<< endl;
samp_ADMM<<setprecision(9)<<nb_outer_iters<<" "<<running_time_ADMM<<" "<< function_value<<" "<< infeas<< endl;
}
}
void ADMM_solve_with_Linear(D beta_0, D val_rho,vector<D> & x0,vector<D> & y0, vector<D> & lambda0, L max_nb_outer, L p_N_1, string filename1, D time){
Initialize(beta_0,val_rho, x0, y0, lambda0);
nb_outer_iters=0;
//string sampname2= ALGparam.data_dir +"/results/L_Katyusha_"+filename2;
//filename1= ALGparam.data_dir +"/results/ADMM_"+filename1;
filename1= "results/ADMM_"+filename1;
samp_ADMM.open(filename1.c_str());
running_time_ADMM=0;
print_every_N_ADMM=p_N_1;
compute_and_record_res();
D start;
D res_x, res_y, res_l;
/*
for(L i=0;i<m_3;i++){
old_lambda[i]=lambda[i];
}
*/
while(nb_outer_iters<max_nb_outer){
//rescale();
for(L i=0;i<m_1;i++){
old_x[i]=x[i];
}
for(L i=0;i<m_3;i++){
old_y[i]=y[i];
}
for(L i=0;i<m_3;i++){
old_lambda[i]=lambda[i];
}
//start = std::clock();
start=omp_get_wtime();
update_x();
update_y();
update_lambda();
nb_outer_iters++;
//running_time_ADMM+=( std::clock() - start ) / (double) CLOCKS_PER_SEC;
running_time_ADMM+=omp_get_wtime()-start;
compute_and_record_res();
/*
res_x= 0;
for(L i=0;i<m_1;i++){
res_x+= (old_x[i]- x[i])*(old_x[i]- x[i]);
}
res_y= 0;
for(L i=0;i<m_3;i++){
res_y= (old_y[i]- y[i])*(old_y[i]- y[i]);
}
res_l= 0;
for(L i=0;i<m_3;i++){
res_l= (old_lambda[i]- lambda[i])*(old_lambda[i]- lambda[i]);
}
cout<< "res_x= "<< res_x<< " res_y= "<< res_y<< " res_l= "<< res_l<< endl;
system("pause");
*/
//start = std::clock();
start=omp_get_wtime();
reset_everything();
//running_time_ADMM+=( std::clock() - start ) / (double) CLOCKS_PER_SEC;
running_time_ADMM+=omp_get_wtime()-start;
if (running_time_ADMM> time){
break;
}
}
}
};
#endif /* MIN_SMOOTH_CONVEX_H */
|
sandpiles.c | /* Sandpiles identity compute and render
* Many piles are processed in parallel using AVX2 and OpenMP.
*
* $ cc -O3 -march=native -fopenmp sandpiles.c
* $ ./a.out >identity.ppm
*
* $ cc -O3 -march=native -DANIMATE -DN=64 -DSCALE=16 sandpiles.c
* $ ./a.out | mpv --no-correct-pts --fps=20 -
*
* Ref: https://www.youtube.com/watch?v=1MtEUErz7Gg
* Ref: https://codegolf.stackexchange.com/a/106990
* Ref: https://nullprogram.com/blog/2017/11/03/
* Ref: https://nullprogram.com/blog/2015/07/10/
* Ref: https://www.youtube.com/watch?v=hBdJB-BzudU
*/
#include <stdio.h>
#include <string.h>
#ifndef N
# define N 1000
#endif
#ifndef SCALE
# define SCALE 1
#endif
/* Color palette */
#define C0 0xff9200
#define C1 0xf53d52
#define C2 0xfce315
#define C3 0x44c5cb
#define CX 0x000000
#ifdef __AVX2__
# include <immintrin.h>
# define TAILSTART N/32*32
#else
# warning Not using AVX2!
# define TAILSTART 0
#endif
static char state[2][2+N+31][2+N+31];
static void
render(void)
{
static unsigned char buf[3L*N*SCALE*N*SCALE];
static const long colors[] = {C0, C1, C2, C3};
for (int y = 0; y < N*SCALE; y++) {
for (int x = 0; x < N*SCALE; x++) {
int v = state[0][1+y/SCALE][1+x/SCALE];
long c = v < 4 ? colors[v] : CX;
buf[y*3L*SCALE*N + x*3L + 0] = c >> 16;
buf[y*3L*SCALE*N + x*3L + 1] = c >> 8;
buf[y*3L*SCALE*N + x*3L + 2] = c >> 0;
}
}
printf("P6\n%d %d\n255\n", N*SCALE, N*SCALE);
fwrite(buf, sizeof(buf), 1, stdout);
}
static void
stabilize(void)
{
for (int n = 0; ; n = !n) {
long spills = 0;
int y; // To satisfy Visual Studio's OpenMP limitations :-(
#pragma omp parallel for
for (y = 0; y < N; y++) {
int xspills = 0;
#ifdef __AVX2__
for (int x = 0; x < N/32*32; x += 32) {
__m256i v = _mm256_loadu_si256((void *)&state[n][1+y][1+x]);
__m256i m = _mm256_cmpgt_epi8(v, _mm256_set1_epi8(3));
__m256i s = _mm256_sub_epi8(v, _mm256_set1_epi8(4));
__m256i r = _mm256_blendv_epi8(v, s, m);
xspills += !_mm256_testz_si256(m, m);
v = _mm256_loadu_si256((void *)&state[n][1+y-1][1+x]);
m = _mm256_cmpgt_epi8(v, _mm256_set1_epi8(3));
s = _mm256_add_epi8(r, _mm256_set1_epi8(1));
r = _mm256_blendv_epi8(r, s, m);
v = _mm256_loadu_si256((void *)&state[n][1+y+1][1+x]);
m = _mm256_cmpgt_epi8(v, _mm256_set1_epi8(3));
s = _mm256_add_epi8(r, _mm256_set1_epi8(1));
r = _mm256_blendv_epi8(r, s, m);
v = _mm256_loadu_si256((void *)&state[n][1+y][1+x-1]);
m = _mm256_cmpgt_epi8(v, _mm256_set1_epi8(3));
s = _mm256_add_epi8(r, _mm256_set1_epi8(1));
r = _mm256_blendv_epi8(r, s, m);
v = _mm256_loadu_si256((void *)&state[n][1+y][1+x+1]);
m = _mm256_cmpgt_epi8(v, _mm256_set1_epi8(3));
s = _mm256_add_epi8(r, _mm256_set1_epi8(1));
r = _mm256_blendv_epi8(r, s, m);
_mm256_storeu_si256((void *)&state[!n][1+y][1+x], r);
}
#endif
for (int x = TAILSTART; x < N; x++) {
int v = state[n][1+y][1+x];
int r = v < 4 ? v : v - 4;
xspills += v >= 4;
r += state[n][1+y-1][1+x] >= 4;
r += state[n][1+y+1][1+x] >= 4;
r += state[n][1+y][1+x-1] >= 4;
r += state[n][1+y][1+x+1] >= 4;
state[!n][1+y][1+x] = r;
}
#pragma omp atomic
spills += xspills;
}
#ifdef ANIMATE
render();
#endif
if (!spills) {
return;
}
}
}
int
main(void)
{
#ifdef _WIN32
int _setmode(int, int);
_setmode(1, 0x8000);
#endif
for (int y = 0; y < N; y++) {
for (int x = 0; x < N; x++) {
state[0][1+y][1+x] = 6;
}
}
stabilize();
for (int y = 0; y < N; y++) {
for (int x = 0; x < N; x++) {
state[0][1+y][1+x] = 6 - state[0][1+y][1+x];
}
}
stabilize();
render();
#ifdef ANIMATE
for (int i = 0; i < 180; i++) render();
#endif
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
blas1_dispatch_vector.h | #ifndef _DG_BLAS_STD_VECTOR_
#define _DG_BLAS_STD_VECTOR_
#ifdef DG_DEBUG
#include <cassert>
#endif //DG_DEBUG
#include <vector>
#include <array>
#include "blas1_dispatch_shared.h"
#include "vector_categories.h"
#include "tensor_traits.h"
#ifdef _OPENMP
#include <omp.h>
#endif //_OPENMP
///@cond
namespace dg
{
template<class to_ContainerType, class from_ContainerType, class ...Params>
inline to_ContainerType construct( const from_ContainerType& src, Params&& ...ps);
template<class from_ContainerType, class to_ContainerType, class ...Params>
inline void assign( const from_ContainerType&, to_ContainerType&, Params&& ...ps);
namespace detail{
template<class To, class From, class ...Params>
To doConstruct( const From& src, ArrayVectorTag, SharedVectorTag, Params&&...ps )
{
To t;
using inner_vector = typename To::value_type;
for (unsigned i=0; i<t.size(); i++)
t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...);
return t;
}
template<class To, class From, class ...Params>
To doConstruct( const From& src, ArrayVectorTag, MPIVectorTag, Params&&...ps )
{
To t;
using inner_vector = typename To::value_type;
for (unsigned i=0; i<t.size(); i++)
t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...);
return t;
}
template<class To, class From, class ...Params>
To doConstruct( const From& src, ArrayVectorTag, RecursiveVectorTag, Params&&...ps )
{
To t;
using inner_vector = typename To::value_type;
for (unsigned i=0; i<t.size(); i++)
t[i] = dg::construct<inner_vector>(src[i], std::forward<Params>(ps)...);
return t;
}
template<class To, class From, class Size, class ...Params>
To doConstruct( const From& src, RecursiveVectorTag, SharedVectorTag, Size size, Params&&... ps )
{
To t(size);
using inner_vector = typename To::value_type;
for (int i=0; i<(int)size; i++)
t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...);
return t;
}
template<class To, class From, class Size, class ...Params>
To doConstruct( const From& src, RecursiveVectorTag, MPIVectorTag, Size size, Params&&... ps )
{
To t(size);
using inner_vector = typename To::value_type;
for (int i=0; i<(int)size; i++)
t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...);
return t;
}
template<class To, class From, class ...Params>
To doConstruct( const From& src, RecursiveVectorTag, RecursiveVectorTag, Params&&...ps )
{
unsigned size = src.size();
To t(size);
using inner_vector = typename To::value_type;
for (unsigned i=0; i<size; i++)
t[i] = dg::construct<inner_vector>(src[i], std::forward<Params>(ps)...);
return t;
}
template<class From, class To, class ...Params>
void doAssign( const From& src, To& to, SharedVectorTag, ArrayVectorTag, Params&&...ps )
{
for (unsigned i=0; i<to.size(); i++)
dg::assign(src, to[i], std::forward<Params>(ps)...);
}
template<class From, class To, class ...Params>
void doAssign( const From& src, To& to, MPIVectorTag, ArrayVectorTag, Params&&...ps )
{
for (unsigned i=0; i<to.size(); i++)
dg::assign(src, to[i], std::forward<Params>(ps)...);
}
template<class From, class To, class ...Params>
void doAssign( const From& src, To& to, RecursiveVectorTag, ArrayVectorTag, Params&&...ps )
{
for (unsigned i=0; i<to.size(); i++)
dg::assign(src[i], to[i], std::forward<Params>(ps)...);
}
template<class From, class To, class Size, class ...Params>
void doAssign( const From& src, To& to, SharedVectorTag, RecursiveVectorTag, Size size, Params&&... ps )
{
to.resize(size);
for (int i=0; i<(int)size; i++)
dg::assign(src, to[i], std::forward<Params>(ps)...);
}
template<class From, class To, class Size, class ...Params>
void doAssign( const From& src, To& to, MPIVectorTag, RecursiveVectorTag, Size size, Params&&... ps )
{
to.resize(size);
for (int i=0; i<(int)size; i++)
dg::assign(src, to[i], std::forward<Params>(ps)...);
}
template<class From, class To, class ...Params>
void doAssign( const From& src, To& to, RecursiveVectorTag, RecursiveVectorTag, Params&&...ps )
{
unsigned size = src.size();
to.resize(size);
for (unsigned i=0; i<size; i++)
dg::assign(src[i], to[i], std::forward<Params>(ps)...);
}
} //namespace detail
namespace blas1
{
namespace detail
{
template< class Vector1, class Vector2>
inline std::vector<int64_t> doDot_superacc( const Vector1& x1, const Vector2& x2, RecursiveVectorTag)
{
//find out which one is the RecursiveVector and determine size
constexpr unsigned vector_idx = find_if_v<dg::is_not_scalar, Vector1, Vector1, Vector2>::value;
auto size = get_idx<vector_idx>(x1,x2).size();
std::vector<int64_t> acc( exblas::BIN_COUNT, (int64_t)0);
for( unsigned i=0; i<size; i++)
{
std::vector<int64_t> temp = doDot_superacc( do_get_vector_element(x1,i,get_tensor_category<Vector1>()), do_get_vector_element(x2,i,get_tensor_category<Vector2>()));
int imin = exblas::IMIN, imax = exblas::IMAX;
exblas::cpu::Normalize( &(temp[0]), imin, imax);
for( int k=exblas::IMIN; k<=exblas::IMAX; k++)
acc[k] += temp[k];
if( (i+1)%128 == 0)
{
imin = exblas::IMIN, imax = exblas::IMAX;
exblas::cpu::Normalize( &(acc[0]), imin, imax);
}
}
return acc;
}
/////////////////////////////////////////////////////////////////////////////////////
#ifdef _OPENMP
//omp tag implementation
template< class size_type, class Subroutine, class container, class ...Containers>
inline void doSubroutine_dispatch( RecursiveVectorTag, OmpTag, size_type size, Subroutine f, container&& x, Containers&&... xs)
{
//using inner_container = typename std::decay<container>::type::value_type;
if( !omp_in_parallel())//to catch recursive calls
{
#pragma omp parallel
{
for( int i=0; i<(int)size; i++) {//omp sometimes has problems if loop variable is not int
dg::blas1::subroutine( f,
do_get_vector_element(std::forward<container>(x),i,get_tensor_category<container>()),
do_get_vector_element(std::forward<Containers>(xs),i,get_tensor_category<Containers>())...);
}
}
}
else //we are already in a parallel omp region
for( int i=0; i<(int)size; i++) {
dg::blas1::subroutine( f,
do_get_vector_element(std::forward<container>(x),i,get_tensor_category<container>()),
do_get_vector_element(std::forward<Containers>(xs),i,get_tensor_category<Containers>())...);
}
}
#endif //_OPENMP
//any tag implementation (recursively call dg::blas1::subroutine)
template<class size_type, class Subroutine, class container, class ...Containers>
inline void doSubroutine_dispatch( RecursiveVectorTag, AnyPolicyTag, size_type size, Subroutine f, container&& x, Containers&&... xs)
{
for( int i=0; i<(int)size; i++) {
dg::blas1::subroutine( f, do_get_vector_element(std::forward<container>(x),i,get_tensor_category<container>()), do_get_vector_element(std::forward<Containers>(xs),i,get_tensor_category<Containers>())...);
}
}
//dispatch
template< class Subroutine, class container, class ...Containers>
inline void doSubroutine( RecursiveVectorTag, Subroutine f, container&& x, Containers&&... xs)
{
constexpr unsigned vector_idx = find_if_v<dg::is_not_scalar, get_value_type<container>, container, Containers...>::value;
auto size = get_idx<vector_idx>( std::forward<container>(x), std::forward<Containers>(xs)...).size();
using vector_type = find_if_t<dg::has_not_any_policy, get_value_type<container>, container, Containers...>;
doSubroutine_dispatch( RecursiveVectorTag(), get_execution_policy<vector_type>(), size, f, std::forward<container>( x), std::forward<Containers>( xs)...);
}
template<class T, class ContainerType, class BinaryOp>
inline T doReduce( RecursiveVectorTag, const ContainerType& x, T init, BinaryOp op)
{
//reduce sequentially recursively
for ( unsigned u=0; u<x.size(); u++)
{
init = op( init, dg::blas1::reduce( x[u], init, op));
}
return init;
}
} //namespace detail
} //namespace blas1
} //namespace dg
///@endcond
#endif //_DG_BLAS_STD_VECTOR_
|
GB_unop__expm1_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__expm1_fc64_fc64
// op(A') function: GB_unop_tran__expm1_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_cexpm1 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cexpm1 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_cexpm1 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__expm1_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cexpm1 (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__expm1_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
config.h | /* config.h. Generated from config.in by configure. */
/* config.in. Generated from configure.ac by autoheader. */
/* Check that config.h is #included before system headers
(this works only for glibc, but that should be enough). */
#if defined(__GLIBC__) && !defined(__FreeBSD_kernel__) && !defined(__CONFIG_H__)
# error config.h must be #included before system headers
#endif
#define __CONFIG_H__ 1
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
/* Set the default --hash-style value */
#define DEFAULT_HASH_STYLE "sysv"
/* Define to 1 if you want to enable -z relro in ELF linker by default. */
/* handled by makfile #define DEFAULT_LD_Z_RELRO 1 */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* Define to enable linker plugins */
#define ENABLE_PLUGINS 1
/* Define to do multi-threaded linking */
/* handled by makefile #define ENABLE_THREADS 1 */
/* Default big endian (true or false) */
#define GOLD_DEFAULT_BIG_ENDIAN false
/* Default machine code */
#define GOLD_DEFAULT_MACHINE EM_X86_64
/* Default OSABI code */
#define GOLD_DEFAULT_OSABI ELFOSABI_NONE
/* Default size (32 or 64) */
#define GOLD_DEFAULT_SIZE 64
/* Define to 1 if you have the <byteswap.h> header file. */
/* #undef HAVE_BYTESWAP_H */
/* Define to 1 if you have the `chsize' function. */
/* #undef HAVE_CHSIZE */
/* Define to 1 if you have the declaration of `asprintf', and to 0 if you
don't. */
#define HAVE_DECL_ASPRINTF 1
/* Define to 1 if you have the declaration of `basename', and to 0 if you
don't. */
#define HAVE_DECL_BASENAME 0
/* Define to 1 if you have the declaration of `ffs', and to 0 if you don't. */
#define HAVE_DECL_FFS 1
/* Define to 1 if you have the declaration of `memmem', and to 0 if you don't.
*/
#define HAVE_DECL_MEMMEM 1
/* Define to 1 if you have the declaration of `snprintf', and to 0 if you
don't. */
#define HAVE_DECL_SNPRINTF 1
/* Define to 1 if you have the declaration of `strndup', and to 0 if you
don't. */
#define HAVE_DECL_STRNDUP 1
/* Define to 1 if you have the declaration of `strverscmp', and to 0 if you
don't. */
#define HAVE_DECL_STRVERSCMP 0
/* Define to 1 if you have the declaration of `vasprintf', and to 0 if you
don't. */
#define HAVE_DECL_VASPRINTF 1
/* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you
don't. */
#define HAVE_DECL_VSNPRINTF 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if you have the <ext/hash_map> header file. */
#define HAVE_EXT_HASH_MAP 1
/* Define to 1 if you have the <ext/hash_set> header file. */
#define HAVE_EXT_HASH_SET 1
/* Define to 1 if you have the `fallocate' function. */
/* #undef HAVE_FALLOCATE */
/* Define to 1 if you have the `ffsll' function. */
#define HAVE_FFSLL 1
/* Define to 1 if you have the `ftruncate' function. */
#define HAVE_FTRUNCATE 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define if your <locale.h> file defines LC_MESSAGES. */
#define HAVE_LC_MESSAGES 1
/* Define to 1 if you have the `link' function. */
#define HAVE_LINK 1
/* Define to 1 if you have the <locale.h> header file. */
#define HAVE_LOCALE_H 1
/* Define to 1 if you have the `mallinfo' function. */
/* #undef HAVE_MALLINFO */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `mkdtemp' function. */
#define HAVE_MKDTEMP 1
/* Define to 1 if you have the `mmap' function. */
#define HAVE_MMAP 1
/* Define to 1 if you have the mremap function with MREMAP_MAYMOVE support */
/* #undef HAVE_MREMAP */
/* Define if compiler supports #pragma omp threadprivate */
#define HAVE_OMP_SUPPORT 1
/* Define to 1 if you have the `posix_fallocate' function. */
/* #undef HAVE_POSIX_FALLOCATE */
/* Define to 1 if you have the `pread' function. */
#define HAVE_PREAD 1
/* Have PTHREAD_PRIO_INHERIT. */
#define HAVE_PTHREAD_PRIO_INHERIT 1
/* Define to 1 if you have the `readv' function. */
#define HAVE_READV 1
/* Define to 1 if you have the `setlocale' function. */
#define HAVE_SETLOCALE 1
/* Define if struct stat has a field st_mtim with timespec for mtime */
#define HAVE_STAT_ST_MTIM 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `sysconf' function. */
#define HAVE_SYSCONF 1
/* Define to 1 if you have the <sys/mman.h> header file. */
#define HAVE_SYS_MMAN_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to support 32-bit big-endian targets */
/* #undef HAVE_TARGET_32_BIG */
/* Define to support 32-bit little-endian targets */
#define HAVE_TARGET_32_LITTLE 1
/* Define to support 64-bit big-endian targets */
/* #undef HAVE_TARGET_64_BIG */
/* Define to support 64-bit little-endian targets */
#define HAVE_TARGET_64_LITTLE 1
/* Define if attributes work on C++ templates */
#define HAVE_TEMPLATE_ATTRIBUTES 1
/* Define to 1 if you have the `times' function. */
#define HAVE_TIMES 1
/* Define if std::tr1::hash<off_t> is usable */
#define HAVE_TR1_HASH_OFF_T 1
/* Define to 1 if you have the <tr1/unordered_map> header file. */
#define HAVE_TR1_UNORDERED_MAP 1
/* Define if ::std::tr1::unordered_map::rehash is usable */
#define HAVE_TR1_UNORDERED_MAP_REHASH 1
/* Define to 1 if you have the <tr1/unordered_set> header file. */
#define HAVE_TR1_UNORDERED_SET 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* BOOTSTRAP: avoid dealing with -std=c++11 */
#if defined(__cplusplus) && __cplusplus >= 201103L
/* Define to 1 if you have the <unordered_map> header file. */
#define HAVE_UNORDERED_MAP 1
/* Define to 1 if you have the <unordered_set> header file. */
#define HAVE_UNORDERED_SET 1
#endif
/* Define to 1 if you have the <windows.h> header file. */
/* #undef HAVE_WINDOWS_H */
/* Default library search path */
#define LIB_PATH "/lib:/usr/lib"
/* Whether configured as a native linker */
#define NATIVE_LINKER 1
/* Name of package */
#define PACKAGE "gold"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define PACKAGE_NAME "gold"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "gold 0.1"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "gold"
/* Define to the home page for this package. */
#define PACKAGE_URL ""
/* Define to the version of this package. */
#define PACKAGE_VERSION "0.1"
/* Define to necessary symbol if this constant uses a non-standard name on
your system. */
/* #undef PTHREAD_CREATE_JOINABLE */
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* System root for target files */
/* handled by makefile #define TARGET_SYSTEM_ROOT "" */
/* Whether the system root can be relocated */
#define TARGET_SYSTEM_ROOT_RELOCATABLE 0
/* Enable extensions on AIX 3, Interix. */
#ifndef _ALL_SOURCE
# define _ALL_SOURCE 1
#endif
/* Enable GNU extensions on systems that have them. */
#ifndef _GNU_SOURCE
# define _GNU_SOURCE 1
#endif
/* Enable threading extensions on Solaris. */
#ifndef _POSIX_PTHREAD_SEMANTICS
# define _POSIX_PTHREAD_SEMANTICS 1
#endif
/* Enable extensions on HP NonStop. */
#ifndef _TANDEM_SOURCE
# define _TANDEM_SOURCE 1
#endif
/* Enable general extensions on Solaris. */
#ifndef __EXTENSIONS__
# define __EXTENSIONS__ 1
#endif
/* Version number of package */
#define VERSION "0.1"
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
/* Define to 1 if on MINIX. */
/* #undef _MINIX */
/* Define to 2 if the system does not provide POSIX.1 features except with
this defined. */
/* #undef _POSIX_1_SOURCE */
/* Define to 1 if you need to in order for `stat' and other things to work. */
/* #undef _POSIX_SOURCE */
|
GB_unop__round_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fp64_fp64)
// op(A') function: GB (_unop_tran__round_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = round (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = round (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = round (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = round (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = round (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr_matvec.h | #ifndef __CSR_MATVEC_H__
#define __CSR_MATVEC_H__
#if defined(_OPENMP)
#include <omp.h>
#include <algorithm>
#include <complex>
// See work my Merrill et. al. (http://ieeexplore.ieee.org/abstract/document/7877136/) for original work and implementation.
// This code contains modified versions of algorithms 2 and 3.
#pragma message ("This header file is deprecated, this should only be used for csr_matvec_wrapper.pyx")
template<class I>
class CountingInputIterator{
const I init;
public:
CountingInputIterator(I _init) : init(_init) {}
I operator[](I i){return init+i;}
};
template<class I>
struct CoordinateT{
I x,y;
CoordinateT(I _x,I _y) : x(_x), y(_y) {}
};
template<class I,class AIteratorT,class BIteratorT>
CoordinateT<I> MergePathSearch(I diagonal, I a_len, I b_len, AIteratorT a, BIteratorT b)
{
// Diagonal search range (in x coordinate space)
I zero = 0;
I x_min = std::max(diagonal - b_len, zero);
I x_max = std::min(diagonal, a_len);
// 2D binary-search along the diagonal search range
while (x_min < x_max) {
I pivot = (x_min + x_max) >> 1;
if (a[pivot] <= b[diagonal - pivot - 1]) {
// Keep top-right half of diagonal range
x_min = pivot + 1;
} else {
// Keep bottom-left half of diagonal range
x_max = pivot;
}
}
return CoordinateT<I>(
std::min(x_min, a_len), // x coordinate in A
diagonal - x_min); // y coordinate in B
}
template<class I,class T1,class T2,class T3>
void csrmv_merge(const bool overwrite_y,
const I num_rows,
const I row_offsets[],
const I column_indices[],
const T1 values[],
const T2 alpha,
const T3 x[],
I row_carry_out[],
T3 value_carry_out[],
T3 y[])
{
const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets
const I num_nonzeros = row_offsets[num_rows];
int num_threads = omp_get_num_threads();
CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices)
I num_merge_items = num_rows + num_nonzeros; // Merge path total length
I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread
T3 alpha_cast = T3(alpha);
if(overwrite_y){
#pragma omp for schedule(static)
for(I i=0;i<num_rows;i++){
y[i] = T3(0);
}
}
// Spawn parallel threads
#pragma omp for schedule(static,1)
for (int tid = 0; tid < num_threads; tid++)
{
// Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread
I diagonal = std::min(items_per_thread * tid, num_merge_items);
I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items);
CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices);
CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices);
// Consume merge items, whole rows first
T3 running_total = 0.0;
for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x)
{
for (; thread_coord.y < row_end_offsets[thread_coord.x]; ++thread_coord.y)
running_total += T3(values[thread_coord.y]) * x[column_indices[thread_coord.y]];
y[thread_coord.x] += alpha_cast*running_total;
running_total = 0.0;
}
// Consume partial portion of thread's last row
for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y)
running_total += T3(values[thread_coord.y]) * x[column_indices[thread_coord.y]];
// Save carry-outs
row_carry_out[tid] = thread_coord_end.x;
value_carry_out[tid] = running_total;
}
// Carry-out fix-up (rows spanning multiple threads)
#pragma omp single
{
for (int tid = 0; tid < num_threads - 1; ++tid)
if (row_carry_out[tid] < num_rows)
y[row_carry_out[tid]] += alpha_cast*value_carry_out[tid];
}
}
template<typename I, typename T1,typename T2,typename T3>
void inline csr_matvec(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const T3 x[],
I rco[],
T3 vco[],
T3 y[])
{
csrmv_merge(overwrite_y,n,Ap,Aj,Ax,a,x,rco,vco,y);
}
#else
#include <complex>
inline int omp_get_max_threads(void){return 1;}
inline int omp_get_num_threads(void){return 1;}
inline int omp_get_thread_num(void){return 0;}
template<typename I, typename T1,typename T2,typename T3>
void csr_matvec(const bool overwrite_y,
const I n,
const I Ap[],
const I Aj[],
const T1 Ax[],
const T2 a,
const T3 x[],
I rco[],
T3 vco[],
T3 y[])
{
if(overwrite_y){
for(I k = 0; k<n; k++){
T3 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += T3(Ax[jj]) * x[Aj[jj]];
}
y[k] = T3(a)*sum;
}
}else{
for(I k = 0; k<n; k++){
T3 sum = 0;
for(I jj = Ap[k]; jj < Ap[k+1]; jj++){
sum += T3(Ax[jj]) * x[Aj[jj]];
}
y[k] += T3(a)*sum;
}
}
}
#endif
#endif |
move_shallow_water_particle_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
// Pablo Becker
//
#if !defined(KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED)
#define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
///@defgroup MoveShallowWaterParticleUtility
///@brief Utility to move particles on the eulerian mesh with an
/// explicit scheme. This is the basic tool of the pfem2 framework
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/checks.h"
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
#include "processes/node_erase_process.h"
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/bounding_box.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "shallow_water_application.h"
#include "shallow_water_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveShallowWaterParticleUtility
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
typedef typename Configure::ContainerType ContainerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector;
KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility);
//template<unsigned int TDim>
MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) :
mrModelPart(rModelPart),
mScalarVar1(KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ),
mVectorVar1(KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) )
{
KRATOS_TRY
std::cout << "Initializing moveparticle utility for scalar transport" << std::endl;
Parameters default_parameters( R"(
{
"convection_scalar_variable" : "HEIGHT",
"convection_vector_variable" : "VELOCITY",
"maximum_number_of_particles" : 16
} )" );
// Now validate agains defaults -- this also ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString();
m_vector_var1_name = rParameters["convection_vector_variable"].GetString();
mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble();
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mLastElemId= (mrModelPart.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
WeakPointerVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = static_cast<double>(rneigh.size());
for( WeakPointerVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
const double current_distance = norm_2( position_difference );
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->SetValue(MEAN_SIZE, distance);
node_id=pnode->GetId();
}
}
mLastNodeId=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double elem_size;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
elem_size = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
elem_size += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < elem_size) elem_size = Length;
}
elem_size = sqrt(elem_size);
ielem->SetValue(MEAN_SIZE, elem_size);
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mNElems = mrModelPart.Elements().size();
std::cout << " about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mParticlesVector.resize(mNElems*mMaxNumberOfParticles);
//and this vector contains the current number of particles that are in each element (currently zero)
mNumOfParticlesInElems.resize(mNElems);
mNumOfParticlesInElems=ZeroVector(mNElems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mNumOfParticlesInElemsAux.resize(mNElems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mVectorOfParticlePointersVectors.resize(mNElems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << " about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
mOffset=0;
//ShallowParticle& firstparticle = mParticlesVector[0];
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mMaxNumberOfParticles*2); j++)
// particle_pointers.push_back(&firstparticle);
mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 );
ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mNumOfParticlesInElems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
ShallowParticle& pparticle = mParticlesVector[particle_id-1];
//~ pparticle.X()=pos(j,0);
//~ pparticle.Y()=pos(j,1);
//~ pparticle.Z()=pos(j,2);
pparticle.Coordinates() = row(pos,j);
pparticle.GetEraseFlag()=false;
array_1d<float, 3 > & vector1 = pparticle.GetVector1();
float & scalar1 = pparticle.GetScalar1();
noalias(vector1) = ZeroVector(3);
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mScalarVar1);
noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(mVectorVar1);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl;
mParticlePrintingToolInitialized=false;
KRATOS_CATCH("")
}
~MoveShallowWaterParticleUtility()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mrModelPart.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << " finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
/// Calculates the mean velocity
/** This function computes the mean velocity within an element and
* stores it in MEAN_VEL_OVER_ELEM_SIZE variable.
* This variable keeps the courant number aprox 0.1 in each substep
*
* @see MoveParticle
* @see MoveParticleInverseWay
*/
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY);
vector_mean_velocity *= nodal_weight;
//~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
const double mean_velocity = norm_2( vector_mean_velocity );
ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) );
}
}
KRATOS_CATCH("")
}
/// Reset the boundary conditions
/** When a variable is fixed this function resets the nodal values
* with the previous time step
*/
void ResetBoundaryConditions()
{
KRATOS_TRY
typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > component_type;
component_type vector_var_x = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_X"));
component_type vector_var_y = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Y"));
component_type vector_var_z = KratosComponents< component_type >::Get(m_vector_var1_name+std::string("_Z"));
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mScalarVar1))
{
inode->FastGetSolutionStepValue(mScalarVar1)=inode->GetSolutionStepValue(mScalarVar1,1);
}
if (inode->IsFixed(vector_var_x))
{
inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1);
}
if (inode->IsFixed(vector_var_y))
{
inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1);
}
if (inode->IsFixed(vector_var_z))
{
inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1);
}
}
}
KRATOS_CATCH("")
}
/// Auxiliar function to compute the "delta variables"
/** Delta variables are the difference between two time steps.
* It's value is used to update particles info
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1); //PROJECTED_SCALAR1
inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a scalar variable value to the previous time step
*/
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a vector variable value to the previous time step
*/
void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Move all the particles
/** This function moves the particles across the streamlines
* according to the velocity given by VELOCITY variable. The
* movement is performed in nsubsteps, during a total time
* of DELTA_TIME
*
* @see Moveparticle
*/
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
mMaxSubSteps = 10;
mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps);
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mNumOfParticlesInElemsAux[ii] = number_of_particles;
mNumOfParticlesInElems[ii] = 0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
WeakPointerVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1];
if ( (results.size()) != max_results )
results.resize(max_results);
unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem)
for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++)
{
ShallowParticle& pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag == false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1];
if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false)
{
ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1];
#pragma omp critical
{
if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) <<
"In move shallow water particle utility: exceeded maximum number of particles" << std::endl;
//~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles)
//~ KRATOS_WATCH("MAL");
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
}
// After having changed everything we change the status of the mOddTimeStep flag:
mOffset = post_offset;; //
KRATOS_CATCH("")
}
/// Transfer particles information to the mesh nodes
/** This function explicitly projects data from particles (lagrangian)
* onto the eulerian mesh. Shape functions of the elements determine
* the particle location within the element and its contribution to
* each node as a weighting function.
*/
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = mOffset;
// the array of pointers for each element has twice the required size so that
// we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
// We must project data from the particles (lagrangian) onto the eulerian mesh
//int nnodes = mrModelPart.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
// We save data from previous time step of the eulerian mesh in case we must reuse it later
// cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later!
// after having saved data, we reset them to zero, this way it's easier to add the contribution
// of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0;
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3);
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
// Adding contribution, loop on elements, since each element has stored the particles found inside of it
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1));
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop!
break;
ShallowParticle& pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1();
const array_1d<float,3>& particle_vector1 = pparticle.GetVector1();
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
// These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//double sq_dist = 0;
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
nodes_added_weights[j] += weight;
nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1);
for (int k=0 ; k!=(TDim); k++) //x,y,(z)
{
nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]);
}
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2];
geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1);
scalar /=sum_weights; // resetting the scalar1
vector /=sum_weights; // resetting the vector1
}
else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(mScalarVar1,1); // Resetting the convected scalar
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(mVectorVar1,1); // Resetting the convected vector
}
}
}
KRATOS_CATCH("")
}
/// Update all the particles without moving them
/** This function updates all the particles variables using the
* "delta variables" from the nodal database.
*
* @see CorrectParticleUsingDeltaVariables
*/
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop!
break;
ShallowParticle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
/// AddUniqueWeakPointer
template< class TDataType >
void AddUniqueWeakPointer
(WeakPointerVector< TDataType >& v, const typename TDataType::WeakPointer candidate)
{
typename WeakPointerVector< TDataType >::iterator i = v.begin();
typename WeakPointerVector< TDataType >::iterator endit = v.end();
while ( i != endit && (i)->Id() != (candidate.lock())->Id())
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
/// Fill an element with particles
/** This function is to be executed after moving particles and
* before tranferring data from lagrangian particles to eulerian mesh
* If an element finishes with less particles than "minimum number
* of particles", then PreReseed adds particles inside it.
* A minimal reseed is performed in order to not disturb the projection
* from lagrangian to euelrian.
*
* @see MinimumNumberOfParticles
*
* @see MoveParticles
* @see MoveParticleInverseWay: is called to get the particle values
*/
void PreReseed(int MinimumNumberOfParticles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset =mOffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows = mrModelPart.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k];
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 )
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticleInverseWay(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mParticlesVector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after the mesh stage solver is
* called and the particles are updated.
* If an element contains less particles than "minimum number of
* particles", then PostReseed adds particles inside it.
* A full reseed is performed and the particle gets it's convected
* variables directly from the eulerian mesh
*
* @param MinimumNumberOfParticles
*
* @see PreReseed
*/
void PostReseed(int MinimumNumberOfParticles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset;
//TOOLS FOR THE PARALELIZATION
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows=mrModelPart.Elements().size();
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<double,3> mesh_vector1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles = 0;
//reseed_more=true;
number_of_reseeded_particles = 3 + 2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
// Now we have to find an empty space (a particle that was about to be deleted) in the
// particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
mesh_scalar1 = 0.0;
mesh_vector1 = ZeroVector(3);
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mScalarVar1);
noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(mVectorVar1);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetVector1()=mesh_vector1;
pparticle.GetEraseFlag()=false;
mParticlesVector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
number_of_particles_in_elem++;
KRATOS_ERROR_IF( keep_looking ) <<
"In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl;
reused_particles++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill a model part with particles
/** This function prints the particles to a model part
*
* @param rLagrangianModelPart: empty model part to print particles
* @param FilterFactor: the function will print one particle of every "filter factor"
*/
void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor )
{
KRATOS_TRY
// We will only print one out of every "filter factor" particles of the total particle list
if (mParticlePrintingToolInitialized == false)
{
KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) <<
"In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl;
rLagrangianModelPart.AddNodalSolutionStepVariable(mScalarVar1);
rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT);
for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++)
{
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mParticlePrintingToolInitialized=true;
}
// Resetting data of the unused particles
const double inactive_particle_position = -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin();
for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mScalarVar1) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter = 0;
//ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin();
for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++)
{
ShallowParticle& pparticle = mParticlesVector[i];
if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mScalarVar1) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
/// Move a particle
/** this function moves a particle according to the velocity given
* by VELOCITY variable. The movement is performed in nsubsteps,
* during a total time of DELTA_TIME
*
* @param pParticle
* @param pElement
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see MoveParticles
*/
void MoveParticle(ShallowParticle & pParticle,
Element::Pointer & pElement,
WeakPointerVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating=true;
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
// DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY
unsigned int check_from_element_number = 0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
keep_integrating=false;
break;
}
}
else
break;
}
}
if (keep_integrating == false) (pParticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement)
if (is_found == false) ( pParticle.GetEraseFlag()=true);
pParticle.Coordinates() = position;
}
/// This function updates a particle
/** This function updates a particle variables using the "delta
* variables" from the nodal database.
*
* @param pParticle
* @param pElement
* @param rGeom
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle,
Element::Pointer & pElement,
Geometry< Node<3> >& rGeom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pParticle.Coordinates();
float & particle_scalar1 = pParticle.GetScalar1();
array_1d<float,3> & particle_vector1 = pParticle.GetVector1();
//double distance=0.0;
double delta_scalar1 = 0.0;
array_1d<double,3> delta_vector1 = ZeroVector(3);
bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
particle_vector1 = particle_vector1 + delta_vector1;
}
/// Move a particle in the inverse way
/** this function moves a particle according to the -velocity given
* by VELOCITY variable. The movement is performed by a backward
* integration in nsubsteps, during a total time of DELTA_TIME
* Before the particle goes out of the element, gets the value
* of the eulerian mesh and stores it
*
* @param pParticle
* @param pElement
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see PreReseed
*/
void MoveParticleInverseWay(ShallowParticle & pParticle,
Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
double scalar1 = 0.0;
array_1d<double,3> vector1;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating = true;
Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mScalarVar1)*N[j];
noalias(vector1) += geom[j].FastGetSolutionStepValue(mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0; // weight;//*double(nsubsteps);
position -= vel*substep_dt; //weight;
for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if (is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mScalarVar1)*N(j);
noalias(vector1) += geom[j].FastGetSolutionStepValue(mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //weight ; //values saved for the current time step
position -= vel*substep_dt; //weight;
}
else keep_integrating = false;
}
}
pParticle.GetScalar1() = scalar1;
pParticle.GetVector1() = vector1;
}
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
*
* @param position of the node
* @param N: return shape functions that define the positions within the elem
* @param pElement: return a pointer to the element
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_1) //that was easy!
{
return true;
}
// To begin with we check the neighbour elements; it is a bit more expensive
WeakPointerVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement=Element::Pointer(((neighb_elems(i))));
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if (results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin+i))->GetGeometry();
//find local position
bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_3)
{
pElement=Element::Pointer((*(ResultBegin+i)));
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
* This version includes predefined elements following a trajectory
*
* @param rPosition of the node
* @param N Output shape functions that define the positions within the elem
* @param pElement Output a pointer to the element
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory Output
* @param CheckFromElementNumber
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
WeakPointerVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
unsigned int & rCheckFromElementNumber,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
//~ const array_1d<double,3>& coords = rPosition;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
// If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++)
{
Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N);
if (is_found_2)
{
pElement = Element::Pointer(((rElementsInTrajectory(i))));
N = aux_N;
rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
// Now we check the neighbour elements:
WeakPointerVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement=Element::Pointer(((neighb_elems(i))));
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found)
{
pElement=Element::Pointer((*(ResultBegin+i)));
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double z0 = rGeom[0].Z();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double z1 = rGeom[1].Z();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double z2 = rGeom[2].Z();
double x3 = rGeom[3].X();
double y3 = rGeom[3].Y();
double z3 = rGeom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& z0 = rNodesPositions[2];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& z1 = rNodesPositions[5];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
const double& z2 = rNodesPositions[8];
const double& x3 = rNodesPositions[9];
const double& y3 = rNodesPositions[10];
const double& z3 = rNodesPositions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the volume
/** This function computes the area of a triangle
*/
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2 )
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
/// Calculate the volume
/** This function computes the volume of a tetrahedron
*/
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3 )
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N )
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
/// Compute the Gauss points
/** For a triangle
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N ) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 9, 3 > & pos,
BoundedMatrix<double, 9, 4 > & N ) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/** For a triangle
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 3, 3 > & pos,
BoundedMatrix<double, 3, 3 > & N ) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 4, 3 > & pos,
BoundedMatrix<double, 4, 4 > & N ) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 45, 3 > & pos,
BoundedMatrix<double, 45, 3 > & N )
{
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 15, 3 > & pos,
BoundedMatrix<double, 15, 3 > & N ) //2D
{
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 20, 3 > & pos,
BoundedMatrix<double, 20, 4 > & N ) //3D
{
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
for (unsigned int j=0; j!=(4-i);j++)
{
for (unsigned int k=0; k!=(4-i-j);k++)
{
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
counter++;
}
}
}
}
/// check function
virtual int Check()
{
KRATOS_TRY
Node<3>& rnode = *mrModelPart.NodesBegin();
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(mVectorVar1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(mScalarVar1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(MEAN_SIZE, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode)
return 0;
KRATOS_CATCH("")
}
/// Member variables
ModelPart& mrModelPart;
int mNParticles;
int mNElems;
int mOffset;
int mMaxSubSteps;
double mMaxSubStepDt;
int mMaxNumberOfParticles;
std::vector< ShallowParticle > mParticlesVector;
int mLastElemId;
bool mOddTimeStep;
bool mParticlePrintingToolInitialized;
unsigned int mLastNodeId;
DenseVector<int> mNumOfParticlesInElems;
DenseVector<int> mNumOfParticlesInElemsAux;
DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
Variable<double> mScalarVar1;
Variable<array_1d<double,3>> mVectorVar1;
std::string m_scalar_var1_name;
std::string m_vector_var1_name;
}; // class MoveShallowWaterParticleUtility
} // namespace Kratos.
#endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
|
test_openacc.c | #include "parallel_algebra_acc.h"
#include <stdlib.h> /* malloc, free, rand */
#include <stdio.h>
#include <time.h>
#include <omp.h>
int set_vector(float * v, long size, float value){
for (long i=0; i<size; i++){
* ( v + i ) = value;
}
return 0;
}
int test_result (float * v, long size, float value){
long res = 0;
for (long i=0; i<size; i++){
res += * ( v + i ) - value < 0.0001 ? 0 : 1;
}
printf("First and last values: %f, %f\n", *v, *(v+size-1));
if (res == 0){
return 0;
} else {
return res;
}
return 0;
}
int serial_saxpby(float * x, float * y, float * out, float a, float b, long size){
for (long i=0; i < size; i++)
{
*(out + i ) = a * ( *(x + i) ) + b * ( *(y + i) );
}
return 0;
}
int omp_saxpby(float * x, float * y, float * out, float a, float b, long size){
//omp_set_num_threads(15);
#pragma omp parallel
{
#pragma omp for
for (long i=0; i < size; i++)
{
*(out + i ) = a * ( *(x + i) ) + b * ( *(y + i) );
}
}
return 0;
}
double elapsed_time(struct timespec * start, struct timespec * finish){
double elapsed;
elapsed = (finish->tv_sec - start->tv_sec);
elapsed += (finish->tv_nsec - start->tv_nsec) / 1000000000.0;
return elapsed;
}
int main(int argc, char ** argv){
int Mb = 100;
if (argc >= 1)
sscanf(argv[1], "%d", &Mb);
printf("Test on %d Mb\n", Mb);
int size = 1024 * 1024 * Mb;
int N=10;
clock_t before, after;
int msec;
struct timespec start, finish;
double dt;
long res[3];
float *restrict v1 = (float *) malloc(size * sizeof(float));
if ( v1 == NULL ){
exit (-1);
}
float *restrict v2 = (float *) malloc(size * sizeof(float));
if ( v2 == NULL ){
exit (-1);
}
set_vector(v1, size, 1);
set_vector(v2, size, -2);
float a = 1.1;
float b = 1.41;
float *restrict out = (float *) malloc(size * sizeof(float));
if ( out == NULL ){
exit (-1);
}
printf("Call to acc_saxpby ... ");
clock_gettime(CLOCK_MONOTONIC, &start);
for (int i=0;i<N; i++)
// a*v1 + b*v2 = 1.1 * 1 + 1.41 * (-2) = 1.1 - 2.82 = - 1.72
saxpby(v1, v2, out, a, b, size);
clock_gettime(CLOCK_MONOTONIC, &finish);
dt = elapsed_time(&start, &finish);
printf(": %f s/iter, total time %f s \n", dt/(double)N, dt);
res[0] = test_result(out, size, -1.72);
set_vector(out, size, -1.);
printf("Call to omp_saxpby ... ");
clock_gettime(CLOCK_MONOTONIC, &start);
for (int i=0;i<N; i++)
// a*v1 + b*v2 = 1.1 * 1 + 1.41 * (-2) = 1.1 - 2.82 = - 1.72
omp_saxpby(v1, v2, out, a, b, size);
clock_gettime(CLOCK_MONOTONIC, &finish);
dt = elapsed_time(&start, &finish);
printf(": %f s/iter, total time %f s \n", dt/(double)N, dt);
res[1] = test_result(out, size, -1.72);
set_vector(out, size, -.0002);
printf("Call to serial_saxpby ... ");
clock_gettime(CLOCK_MONOTONIC, &start);
for (int i=0;i<N; i++)
// a*v1 + b*v2 = 1.1 * 1 + 1.41 * (-2) = 1.1 - 2.82 = - 1.72
serial_saxpby(v1, v2, out, a, b, size);
clock_gettime(CLOCK_MONOTONIC, &finish);
dt = elapsed_time(&start, &finish);
printf(": %f s/iter, total time %f s \n", dt/(double)N, dt);
res[2] = test_result(out, size, -1.72);
printf("The array res %d %d %d\n", (int)res[0], (int)res[1], (int)res[2]);
return (int) res[0];
}
|
ZQ_CNN_BBoxUtils.h | #ifndef _ZQ_CNN_BBOX_UTILS_H_
#define _ZQ_CNN_BBOX_UTILS_H_
#pragma once
#include "ZQ_CNN_BBox.h"
#include <algorithm>
namespace ZQ
{
class ZQ_CNN_BBoxUtils
{
public:
enum PriorBoxCodeType
{
PriorBoxCodeType_CORNER = 0,
PriorBoxCodeType_CORNER_SIZE,
PriorBoxCodeType_CENTER_SIZE
};
static bool _cmp_score(const ZQ_CNN_OrderScore& lsh, const ZQ_CNN_OrderScore& rsh)
{
return lsh.score < rsh.score;
}
static void _nms(std::vector<ZQ_CNN_BBox> &boundingBox, std::vector<ZQ_CNN_OrderScore> &bboxScore, const float overlap_threshold,
const std::string& modelname = "Union", int overlap_count_thresh = 0, int thread_num = 1)
{
if (boundingBox.empty())
{
return;
}
std::vector<int> heros;
std::vector<int> overlap_num;
//sort the score
sort(bboxScore.begin(), bboxScore.end(), _cmp_score);
int order = 0;
float IOU = 0;
float maxX = 0;
float maxY = 0;
float minX = 0;
float minY = 0;
while (bboxScore.size() > 0)
{
order = bboxScore.back().oriOrder;
bboxScore.pop_back();
if (order < 0)continue;
heros.push_back(order);
int cur_overlap = 0;
boundingBox[order].exist = false;//delete it
int box_num = boundingBox.size();
if (thread_num == 1)
{
for (int num = 0; num < box_num; num++)
{
if (boundingBox[num].exist)
{
//the iou
maxY = __max(boundingBox[num].row1, boundingBox[order].row1);
maxX = __max(boundingBox[num].col1, boundingBox[order].col1);
minY = __min(boundingBox[num].row2, boundingBox[order].row2);
minX = __min(boundingBox[num].col2, boundingBox[order].col2);
//maxX1 and maxY1 reuse
maxX = __max(minX - maxX + 1, 0);
maxY = __max(minY - maxY + 1, 0);
//IOU reuse for the area of two bbox
IOU = maxX * maxY;
float area1 = boundingBox[num].area;
float area2 = boundingBox[order].area;
if (!modelname.compare("Union"))
IOU = IOU / (area1 + area2 - IOU);
else if (!modelname.compare("Min"))
{
IOU = IOU / __min(area1, area2);
}
if (IOU > overlap_threshold)
{
cur_overlap++;
boundingBox[num].exist = false;
for (std::vector<ZQ_CNN_OrderScore>::iterator it = bboxScore.begin(); it != bboxScore.end(); it++)
{
if ((*it).oriOrder == num)
{
(*it).oriOrder = -1;
break;
}
}
}
}
}
}
else
{
int chunk_size = ceil(box_num / thread_num);
#pragma omp parallel for schedule(static, chunk_size) num_threads(thread_num)
for (int num = 0; num < box_num; num++)
{
if (boundingBox.at(num).exist)
{
//the iou
maxY = __max(boundingBox[num].row1, boundingBox[order].row1);
maxX = __max(boundingBox[num].col1, boundingBox[order].col1);
minY = __min(boundingBox[num].row2, boundingBox[order].row2);
minX = __min(boundingBox[num].col2, boundingBox[order].col2);
//maxX1 and maxY1 reuse
maxX = __max(minX - maxX + 1, 0);
maxY = __max(minY - maxY + 1, 0);
//IOU reuse for the area of two bbox
IOU = maxX * maxY;
float area1 = boundingBox[num].area;
float area2 = boundingBox[order].area;
if (!modelname.compare("Union"))
IOU = IOU / (area1 + area2 - IOU);
else if (!modelname.compare("Min"))
{
IOU = IOU / __min(area1, area2);
}
if (IOU > overlap_threshold)
{
cur_overlap++;
boundingBox.at(num).exist = false;
for (std::vector<ZQ_CNN_OrderScore>::iterator it = bboxScore.begin(); it != bboxScore.end(); it++)
{
if ((*it).oriOrder == num)
{
(*it).oriOrder = -1;
break;
}
}
}
}
}
}
overlap_num.push_back(cur_overlap);
}
for (int i = 0; i < heros.size(); i++)
{
if(!boundingBox[heros[i]].need_check_overlap_count
|| overlap_num[i] >= overlap_count_thresh)
boundingBox[heros[i]].exist = true;
}
//clear exist= false;
for (int i = boundingBox.size() - 1; i >= 0; i--)
{
if (!boundingBox[i].exist)
{
boundingBox.erase(boundingBox.begin() + i);
}
}
}
static void _refine_and_square_bbox(std::vector<ZQ_CNN_BBox> &vecBbox, const int width, const int height,
bool square = true)
{
float bbw = 0, bbh = 0, bboxSize = 0;
float h = 0, w = 0;
float x1 = 0, y1 = 0, x2 = 0, y2 = 0;
for (std::vector<ZQ_CNN_BBox>::iterator it = vecBbox.begin(); it != vecBbox.end(); it++)
{
if ((*it).exist)
{
bbh = (*it).row2 - (*it).row1 + 1;
bbw = (*it).col2 - (*it).col1 + 1;
y1 = (*it).row1 + (*it).regreCoord[1] * bbh;
x1 = (*it).col1 + (*it).regreCoord[0] * bbw;
y2 = (*it).row2 + (*it).regreCoord[3] * bbh;
x2 = (*it).col2 + (*it).regreCoord[2] * bbw;
w = x2 - x1 + 1;
h = y2 - y1 + 1;
if (square)
{
bboxSize = (h > w) ? h : w;
y1 = y1 + h*0.5 - bboxSize*0.5;
x1 = x1 + w*0.5 - bboxSize*0.5;
(*it).row2 = round(y1 + bboxSize - 1);
(*it).col2 = round(x1 + bboxSize - 1);
(*it).row1 = round(y1);
(*it).col1 = round(x1);
}
else
{
(*it).row2 = round(y1 + h - 1);
(*it).col2 = round(x1 + w - 1);
(*it).row1 = round(y1);
(*it).col1 = round(x1);
}
//boundary check
if ((*it).row1 < 0)(*it).row1 = 0;
if ((*it).col1 < 0)(*it).col1 = 0;
if ((*it).row2 > height)(*it).row2 = height - 1;
if ((*it).col2 > width)(*it).col2 = width - 1;
it->area = (it->row2 - it->row1)*(it->col2 - it->col1);
}
}
}
static void _square_bbox(std::vector<ZQ_CNN_BBox> &vecBbox, const int width, const int height)
{
float bbw = 0, bbh = 0, bboxSize = 0;
float h = 0, w = 0;
float x1 = 0, y1 = 0, x2 = 0, y2 = 0;
for (std::vector<ZQ_CNN_BBox>::iterator it = vecBbox.begin(); it != vecBbox.end(); it++)
{
if ((*it).exist)
{
y1 = (*it).row1;
x1 = (*it).col1;
h = (*it).row2 - (*it).row1 + 1;
w = (*it).col2 - (*it).col1 + 1;
bboxSize = (h > w) ? h : w;
y1 = y1 + h*0.5 - bboxSize*0.5;
x1 = x1 + w*0.5 - bboxSize*0.5;
(*it).row2 = round(y1 + bboxSize - 1);
(*it).col2 = round(x1 + bboxSize - 1);
(*it).row1 = round(y1);
(*it).col1 = round(x1);
//boundary check
if ((*it).row1 < 0)(*it).row1 = 0;
if ((*it).col1 < 0)(*it).col1 = 0;
if ((*it).row2 > height)(*it).row2 = height - 1;
if ((*it).col2 > width)(*it).col2 = width - 1;
it->area = (it->row2 - it->row1)*(it->col2 - it->col1);
}
}
}
static bool DecodeBBoxesAll(const std::vector<ZQ_CNN_LabelBBox>& all_loc_preds,
const std::vector<ZQ_CNN_NormalizedBBox>& prior_bboxes,
const std::vector<std::vector<float> >& prior_variances,
const int num, const bool share_location,
const int num_loc_classes, const int background_label_id,
const PriorBoxCodeType code_type, const bool variance_encoded_in_target,
const bool clip, std::vector<ZQ_CNN_LabelBBox>* all_decode_bboxes)
{
if (all_loc_preds.size() != num)
return false;
all_decode_bboxes->clear();
all_decode_bboxes->resize(num);
for (int i = 0; i < num; ++i) {
// Decode predictions into bboxes.
ZQ_CNN_LabelBBox& decode_bboxes = (*all_decode_bboxes)[i];
for (int c = 0; c < num_loc_classes; ++c)
{
int label = share_location ? -1 : c;
if (label == background_label_id) {
// Ignore background class.
continue;
}
if (all_loc_preds[i].find(label) == all_loc_preds[i].end())
{
// Something bad happened if there are no predictions for current label.
//LOG(FATAL) << "Could not find location predictions for label " << label;
}
const std::vector<ZQ_CNN_NormalizedBBox>& label_loc_preds = all_loc_preds[i].find(label)->second;
if (!DecodeBBoxes(prior_bboxes, prior_variances,
code_type, variance_encoded_in_target, clip,
label_loc_preds, &(decode_bboxes[label])))
return false;
}
}
return true;
}
static bool DecodeBBoxes(
const std::vector<ZQ_CNN_NormalizedBBox>& prior_bboxes,
const std::vector<std::vector<float> >& prior_variances,
const PriorBoxCodeType code_type, const bool variance_encoded_in_target,
const bool clip_bbox, const std::vector<ZQ_CNN_NormalizedBBox>& bboxes,
std::vector<ZQ_CNN_NormalizedBBox>* decode_bboxes)
{
if (prior_bboxes.size() != prior_variances.size())
return false;
if (prior_bboxes.size() != bboxes.size())
return false;
int num_bboxes = prior_bboxes.size();
if (num_bboxes >= 1)
{
if (prior_variances[0].size() != 4)
return false;
}
decode_bboxes->clear();
for (int i = 0; i < num_bboxes; ++i)
{
ZQ_CNN_NormalizedBBox decode_bbox;
if (!DecodeBBox(prior_bboxes[i], prior_variances[i], code_type,
variance_encoded_in_target, clip_bbox, bboxes[i], &decode_bbox))
return false;
decode_bboxes->push_back(decode_bbox);
}
return true;
}
static bool DecodeBBox(
const ZQ_CNN_NormalizedBBox& prior_bbox, const std::vector<float>& prior_variance,
const PriorBoxCodeType code_type, const bool variance_encoded_in_target,
const bool clip_bbox, const ZQ_CNN_NormalizedBBox& bbox,
ZQ_CNN_NormalizedBBox* decode_bbox)
{
if (code_type == PriorBoxCodeType_CORNER)
{
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
decode_bbox->col1 = prior_bbox.col1 + bbox.col1;
decode_bbox->col2 = prior_bbox.col2 + bbox.col2;
decode_bbox->row1 = prior_bbox.row1 + bbox.row1;
decode_bbox->row2 = prior_bbox.row2 + bbox.row2;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox->col1 = prior_bbox.col1 + prior_variance[0] * bbox.col1;
decode_bbox->row1 = prior_bbox.row1 + prior_variance[1] * bbox.row1;
decode_bbox->col2 = prior_bbox.col2 + prior_variance[2] * bbox.col2;
decode_bbox->row2 = prior_bbox.row2 + prior_variance[3] * bbox.row2;
}
}
else if (code_type == PriorBoxCodeType_CENTER_SIZE)
{
float prior_width = prior_bbox.col2 - prior_bbox.col1;
if (prior_width < 0)
{
// return false;
printf("x = [%f , %f]\n", prior_bbox.col1, prior_bbox.col2);
}
float prior_height = prior_bbox.row2 - prior_bbox.row1;
if (prior_height < 0)
{
//return false;
printf("y = [%f , %f]\n", prior_bbox.row1, prior_bbox.row2);
}
float prior_center_x = (prior_bbox.col1 + prior_bbox.col2) / 2.;
float prior_center_y = (prior_bbox.row1 + prior_bbox.row2) / 2.;
float decode_bbox_center_x, decode_bbox_center_y;
float decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = bbox.col1 * prior_width + prior_center_x;
decode_bbox_center_y = bbox.row1 * prior_height + prior_center_y;
decode_bbox_width = exp(bbox.col2) * prior_width;
decode_bbox_height = exp(bbox.row2) * prior_height;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x = prior_variance[0] * bbox.col1 * prior_width + prior_center_x;
decode_bbox_center_y = prior_variance[1] * bbox.row1 * prior_height + prior_center_y;
decode_bbox_width = exp(prior_variance[2] * bbox.col2) * prior_width;
decode_bbox_height = exp(prior_variance[3] * bbox.row2) * prior_height;
}
decode_bbox->col1 = decode_bbox_center_x - decode_bbox_width / 2.;
decode_bbox->row1 = decode_bbox_center_y - decode_bbox_height / 2.;
decode_bbox->col2 = decode_bbox_center_x + decode_bbox_width / 2.;
decode_bbox->row2 = decode_bbox_center_y + decode_bbox_height / 2.;
}
else if (code_type == PriorBoxCodeType_CORNER_SIZE)
{
float prior_width = prior_bbox.col2 - prior_bbox.col1;
if (prior_width < 0)
{
//return false;
printf("x = [%f , %f]\n", prior_bbox.col1, prior_bbox.col2);
}
float prior_height = prior_bbox.row2 - prior_bbox.row1;
if (prior_height < 0)
{
// return false;
printf("y = [%f , %f]\n", prior_bbox.row1, prior_bbox.row2);
}
if (variance_encoded_in_target)
{
// variance is encoded in target, we simply need to add the offset
// predictions.
decode_bbox->col1 = prior_bbox.col1 + bbox.col1 * prior_width;
decode_bbox->row1 = prior_bbox.row1 + bbox.row1 * prior_height;
decode_bbox->col2 = prior_bbox.col2 + bbox.col2 * prior_width;
decode_bbox->row2 = prior_bbox.row2 + bbox.row2 * prior_height;
}
else
{
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox->col1 = prior_bbox.col1 + prior_variance[0] * bbox.col1 * prior_width;
decode_bbox->row1 = prior_bbox.row1 + prior_variance[1] * bbox.row1 * prior_height;
decode_bbox->col2 = prior_bbox.col2 + prior_variance[2] * bbox.col2 * prior_width;
decode_bbox->row2 = prior_bbox.row2 + prior_variance[3] * bbox.row2 * prior_height;
}
}
else
{
printf("unknown code type\n");
return false;
}
float bbox_size = BBoxSize(*decode_bbox, true);
decode_bbox->size = bbox_size;
if (clip_bbox)
{
ClipBBox(*decode_bbox, decode_bbox);
}
return true;
}
static float BBoxSize(const ZQ_CNN_NormalizedBBox& bbox, const bool normalized)
{
if (bbox.col2 < bbox.col1 || bbox.row2 < bbox.row1)
{
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
}
else
{
float width = bbox.col2 - bbox.col1;
float height = bbox.row2 - bbox.row1;
if (normalized)
{
return width * height;
}
else
{
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
static void ClipBBox(const ZQ_CNN_NormalizedBBox& bbox, ZQ_CNN_NormalizedBBox* clip_bbox)
{
clip_bbox->col1 = std::max(std::min(bbox.col1, 1.f), 0.f);
clip_bbox->row1 = std::max(std::min(bbox.row1, 1.f), 0.f);
clip_bbox->col2 = std::max(std::min(bbox.col2, 1.f), 0.f);
clip_bbox->row2 = std::max(std::min(bbox.row2, 1.f), 0.f);
clip_bbox->size = BBoxSize(*clip_bbox, true);
clip_bbox->difficult = bbox.difficult;
}
static bool GetLocPredictions(const float* loc_data, const int num,
const int num_preds_per_class, const int num_loc_classes,
const bool share_location, std::vector<ZQ_CNN_LabelBBox>* loc_preds)
{
loc_preds->clear();
if (share_location)
{
if (num_loc_classes != 1)
return false;
}
loc_preds->resize(num);
for (int i = 0; i < num; i++)
{
ZQ_CNN_LabelBBox& label_bbox = (*loc_preds)[i];
for (int p = 0; p < num_preds_per_class; p++)
{
int start_idx = p * num_loc_classes * 4;
for (int c = 0; c < num_loc_classes; c++)
{
int label = share_location ? -1 : c;
if (label_bbox.find(label) == label_bbox.end())
{
label_bbox[label].resize(num_preds_per_class);
}
label_bbox[label][p].col1 = loc_data[start_idx + c * 4];
label_bbox[label][p].row1 = loc_data[start_idx + c * 4 + 1];
label_bbox[label][p].col2 = loc_data[start_idx + c * 4 + 2];
label_bbox[label][p].row2 = loc_data[start_idx + c * 4 + 3];
}
}
loc_data += num_preds_per_class * num_loc_classes * 4;
}
return true;
}
static void TransformLocations_MXNET(float *out, const float *anchors,
const float *loc_pred, const bool clip,
const float vx, const float vy, const float vw, const float vh)
{
// transform predictions to detection results
float al = anchors[0];
float at = anchors[1];
float ar = anchors[2];
float ab = anchors[3];
float aw = ar - al;
float ah = ab - at;
float ax = (al + ar) / 2.f;
float ay = (at + ab) / 2.f;
float px = loc_pred[0];
float py = loc_pred[1];
float pw = loc_pred[2];
float ph = loc_pred[3];
float ox = px * vx * aw + ax;
float oy = py * vy * ah + ay;
float ow = std::exp(pw * vw) * aw / 2;
float oh = std::exp(ph * vh) * ah / 2;
out[0] = clip ? __max(0, __min(1, ox - ow)) : (ox - ow);
out[1] = clip ? __max(0, __min(1, oy - oh)) : (oy - oh);
out[2] = clip ? __max(0, __min(1, ox + ow)) : (ox + ow);
out[3] = clip ? __max(0, __min(1, oy + oh)) : (oy + oh);
}
static void GetConfidenceScores(const float* conf_data, const int num,
const int num_preds_per_class, const int num_classes,
std::vector<std::map<int, std::vector<float> > >* conf_preds)
{
conf_preds->clear();
conf_preds->resize(num);
for (int i = 0; i < num; ++i)
{
std::map<int, std::vector<float> >& label_scores = (*conf_preds)[i];
for (int p = 0; p < num_preds_per_class; ++p)
{
int start_idx = p * num_classes;
for (int c = 0; c < num_classes; ++c)
{
label_scores[c].push_back(conf_data[start_idx + c]);
}
}
conf_data += num_preds_per_class * num_classes;
}
}
static void GetConfidenceScores(const float* conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const bool class_major, std::vector<std::map<int, std::vector<float> > >* conf_preds)
{
conf_preds->clear();
conf_preds->resize(num);
for (int i = 0; i < num; ++i)
{
std::map<int, std::vector<float> >& label_scores = (*conf_preds)[i];
if (class_major)
{
for (int c = 0; c < num_classes; ++c)
{
label_scores[c].assign(conf_data, conf_data + num_preds_per_class);
conf_data += num_preds_per_class;
}
}
else
{
for (int p = 0; p < num_preds_per_class; ++p)
{
int start_idx = p * num_classes;
for (int c = 0; c < num_classes; ++c)
{
label_scores[c].push_back(conf_data[start_idx + c]);
}
}
conf_data += num_preds_per_class * num_classes;
}
}
}
static void GetPriorBBoxes(const float* prior_data, const int num_priors,
std::vector<ZQ_CNN_NormalizedBBox>* prior_bboxes,
std::vector<std::vector<float> >* prior_variances)
{
prior_bboxes->clear();
prior_variances->clear();
for (int i = 0; i < num_priors; ++i)
{
int start_idx = i * 4;
ZQ_CNN_NormalizedBBox bbox;
bbox.col1 = prior_data[start_idx];
bbox.row1 = prior_data[start_idx + 1];
bbox.col2 = prior_data[start_idx + 2];
bbox.row2 = prior_data[start_idx + 3];
float bbox_size = BBoxSize(bbox, true);
bbox.size = bbox_size;
prior_bboxes->push_back(bbox);
}
for (int i = 0; i < num_priors;i++)
{
int start_idx = (num_priors + i) * 4;
std::vector<float> var;
for (int j = 0; j < 4; ++j)
{
var.push_back(prior_data[start_idx + j]);
}
prior_variances->push_back(var);
}
}
static bool ApplyNMSFast(const std::vector<ZQ_CNN_NormalizedBBox>& bboxes,
const std::vector<float>& scores, const float score_threshold,
const float nms_threshold, const float eta, const int top_k,
std::vector<int>* indices)
{
// Sanity check.
if (bboxes.size() != scores.size())
{
printf("bboxes and scores have different size.\n");
return false;
}
// Get top_k scores (with corresponding indices).
std::vector<std::pair<float, int> > score_index_vec;
GetMaxScoreIndex(scores, score_threshold, top_k, &score_index_vec);
// Do nms.
float adaptive_threshold = nms_threshold;
indices->clear();
while (score_index_vec.size() != 0)
{
const int idx = score_index_vec.front().second;
bool keep = true;
for (int k = 0; k < indices->size(); ++k)
{
if (keep)
{
const int kept_idx = (*indices)[k];
float overlap = JaccardOverlap(bboxes[idx], bboxes[kept_idx], true);
keep = overlap <= adaptive_threshold;
}
else
{
break;
}
}
if (keep)
{
indices->push_back(idx);
}
score_index_vec.erase(score_index_vec.begin());
if (keep && eta < 1 && adaptive_threshold > 0.5)
{
adaptive_threshold *= eta;
}
}
return true;
}
static void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold,
const int top_k, std::vector<std::pair<float, int> >* score_index_vec)
{
// Generate index score pairs.
for (int i = 0; i < scores.size(); ++i)
{
if (scores[i] > threshold)
{
score_index_vec->push_back(std::make_pair(scores[i], i));
}
}
// Sort the score pair according to the scores in descending order
std::stable_sort(score_index_vec->begin(), score_index_vec->end(), SortScorePairDescend<int>);
// Keep top_k scores if needed.
if (top_k > -1 && top_k < score_index_vec->size())
{
score_index_vec->resize(top_k);
}
}
template <typename T>
static bool SortScorePairDescend(const std::pair<float, T>& pair1, const std::pair<float, T>& pair2)
{
return pair1.first > pair2.first;
}
static float JaccardOverlap(const ZQ_CNN_NormalizedBBox& bbox1, const ZQ_CNN_NormalizedBBox& bbox2,
const bool normalized) {
ZQ_CNN_NormalizedBBox intersect_bbox;
IntersectBBox(bbox1, bbox2, &intersect_bbox);
float intersect_width, intersect_height;
if (normalized)
{
intersect_width = intersect_bbox.col2 - intersect_bbox.col1;
intersect_height = intersect_bbox.row2 - intersect_bbox.row1;
}
else
{
intersect_width = intersect_bbox.col2 - intersect_bbox.col1 + 1;
intersect_height = intersect_bbox.row2 - intersect_bbox.row1 + 1;
}
if (intersect_width > 0 && intersect_height > 0)
{
float intersect_size = intersect_width * intersect_height;
float bbox1_size = BBoxSize(bbox1, true);
float bbox2_size = BBoxSize(bbox2, true);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
}
else
{
return 0.;
}
}
static void IntersectBBox(const ZQ_CNN_NormalizedBBox& bbox1, const ZQ_CNN_NormalizedBBox& bbox2, ZQ_CNN_NormalizedBBox* intersect_bbox)
{
if (bbox2.col1 > bbox1.col2 || bbox2.col2 < bbox1.col1 ||
bbox2.row1 > bbox1.row2 || bbox2.row2 < bbox1.row1)
{
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->col1 = 0;
intersect_bbox->row1 = 0;
intersect_bbox->col2 = 0;
intersect_bbox->row2 = 0;
}
else
{
intersect_bbox->col1 = std::max(bbox1.col1, bbox2.col1);
intersect_bbox->row1 = std::max(bbox1.row1, bbox2.row1);
intersect_bbox->col2 = std::min(bbox1.col2, bbox2.col2);
intersect_bbox->row2 = std::min(bbox1.row2, bbox2.row2);
}
}
};
}
#endif
|
GB_binop__minus_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_uint64
// A.*B function (eWiseMult): GB_AemultB__minus_uint64
// A*D function (colscale): GB_AxD__minus_uint64
// D*A function (rowscale): GB_DxB__minus_uint64
// C+=B function (dense accum): GB_Cdense_accumB__minus_uint64
// C+=b function (dense accum): GB_Cdense_accumb__minus_uint64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_uint64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_uint64
// C=scalar+B GB_bind1st__minus_uint64
// C=scalar+B' GB_bind1st_tran__minus_uint64
// C=A+scalar GB_bind2nd__minus_uint64
// C=A'+scalar GB_bind2nd_tran__minus_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_UINT64 || GxB_NO_MINUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int8_int8)
// op(A') function: GB (_unop_tran__minv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
taskloop_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp taskloop simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}}
#pragma omp taskloop simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}}
#pragma omp taskloop simd foo
void test_no_clause() {
int i;
#pragma omp taskloop simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp taskloop simd' must be a for loop}}
#pragma omp taskloop simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp taskloop simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp taskloop simd' cannot contain more than one 'nogroup' clause}}
#pragma omp taskloop simd nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp parallel
#pragma omp taskloop simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
#pragma omp taskloop simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp taskloop simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp taskloop simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
#pragma omp taskloop simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp taskloop simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp taskloop simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp taskloop simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp taskloop simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp taskloop simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp taskloop simd
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
parallel_omp.c |
#include<stdio.h>
#include<stdlib.h>
#include<stdint.h>
#include<omp.h>
void init_array(uint64_t * array, uint64_t array_size){
uint64_t i;
for(i = 0; i < array_size; ++i){
array[i] = i+1;
}
}
uint64_t sum_array(uint64_t * array, uint64_t start_index, uint64_t end_index, uint64_t times){
uint64_t i,t;
uint64_t sum = 0;
#pragma omp parallel for reduction(+ : sum) private(i)
for(t = 0; t < times; ++t){
for(i = start_index; i < end_index; ++i){
sum += array[i];
}
}
return sum;
}
void check_sum(uint64_t array_size, uint64_t times, uint64_t sum){
uint64_t real_sum = times*((array_size*(array_size +1))/2);
if(real_sum == sum){
printf("Array sum is correct (%ld)\n",sum);
}
else{
printf("Array sum is NOT correct (%ld), should have been: %ld\n",sum, real_sum);
}
}
int main (int argc, char *argv[]){
uint64_t array_size = 0;
uint64_t times = 0;
uint64_t num_threads = 0;
uint64_t * array;
if(argc != 4){
printf("usage: %s [array size] [times] [num threads]\n",argv[0]);
return 0;
}
array_size = atoi(argv[1]);
times = atoi(argv[2]);
num_threads = atoi(argv[3]);
printf("Array size: %lu\n",array_size);
printf("Sum times: %lu\n",times);
printf("num_threads: %lu\n",num_threads);
omp_set_num_threads(num_threads);
array = malloc(array_size* sizeof(uint64_t ));
if(array == NULL){
printf("Could not allocate Array... bye bye!\n");
return 0;
}
init_array(array, array_size);
uint64_t sum = sum_array(array, 0, array_size, times);
check_sum( array_size, times, sum);
} |
HDF5SubdomainDumper.h | //
// HDF5SubdomainDumper.h
// Cubism
//
// Created by Fabian Wermelinger 2018-08-03
// Copyright 2018 ETH Zurich. All rights reserved.
//
#ifndef HDF5SUBDOMAINDUMPER_H_3C2DKYV4
#define HDF5SUBDOMAINDUMPER_H_3C2DKYV4
#include <cassert>
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include "HDF5Dumper.h"
CUBISM_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// helpers
namespace SubdomainTypes
{
template <typename TGrid>
class Subdomain
{
public:
template <typename TSubdomain>
static std::vector<TSubdomain> getEntities(ArgumentParser& parser, TGrid& grid)
{
typedef typename TGrid::BlockType B;
// Concept:
// 1.) Extract bounding box data from parser input subdomains
// 2.) Compute global bounding box for each subdomain
// defines the number of subdomains in the config file:
// nsubdomains 1 # defines one subdomain
const size_t nSubdomains = parser("nsubdomains").asInt(0);
std::vector<TSubdomain> subdomains;
for (size_t i = 0; i < nSubdomains; ++i)
{
// 1.)
const size_t id = i+1;
std::ostringstream identifier;
identifier << "subdomain" << id;
parser.set_strict_mode();
// each defined subdomain requires an origin and an extent:
// subdomain1_origin 0 1.0 2 # requires 3 coordinates, separated by white space (no newlines!)
// subdomain1_extent 1 1 1 # if subdomain exceeds simulation domain, it will be clipped.
const std::string input_origin = parser(identifier.str()+"_origin").asString();
const std::string input_extent = parser(identifier.str()+"_extent").asString();
parser.unset_strict_mode();
std::vector<double> new_origin(0);
{
std::istringstream iss(input_origin);
for (int coord = 0; coord < 3; ++coord)
{
assert(!iss.eof());
double val;
iss >> val;
new_origin.push_back(val);
}
}
std::vector<double> new_extent(0);
{
std::istringstream iss(input_extent);
for (int coord = 0; coord < 3; ++coord)
{
assert(!iss.eof());
double val;
iss >> val;
new_extent.push_back(val);
}
}
// 2.)
int idx_start[3];
int idx_end[3];
const double* h[3];
double sub_start[3];
double sub_end[3];
for (int coord = 0; coord < 3; ++coord)
{
const MeshMap<B>& mmap = grid.getMeshMap(coord);
const double c_start = mmap.start();
const double c_end = mmap.end();
// check and reset if domain violation.
assert(new_origin[coord] < c_end);
assert(new_extent[coord] > 0);
if (new_origin[coord] < c_start)
new_origin[coord] = c_start; // set to domain start
if (new_origin[coord]+new_extent[coord] > c_end)
new_extent[coord] = c_end - new_origin[coord]; // clip to domain end
// compute bounding box
const unsigned int ncells = mmap.ncells();
const double lower_bound = new_origin[coord];
double c_vertex = c_start;
for (unsigned int cell = 0; cell < ncells; ++cell)
{
c_vertex += mmap.cell_width(cell);
if (lower_bound < c_vertex)
{
idx_start[coord] = cell;
h[coord] = mmap.data_grid_spacing() + cell;
sub_start[coord] = c_vertex - mmap.cell_width(cell);
break;
}
}
const double upper_bound = lower_bound + new_extent[coord];
c_vertex = c_end;
for (int cell = ncells-1; cell >= 0; --cell)
{
c_vertex -= mmap.cell_width(cell);
if (upper_bound > c_vertex)
{
idx_end[coord] = cell;
sub_end[coord] = c_vertex + mmap.cell_width(cell);
break;
}
}
}
subdomains.emplace_back(&grid, id, sub_start, sub_end, h, idx_start, idx_end);
}
return subdomains;
}
public:
typedef TGrid GridType;
// bb_start: cell index within which the bounding box start (lower left) lies
// bb_end: cell index within which the bounding box end (upper right) lies
Subdomain(TGrid* grid, const int id,
const double start[3], const double end[3], const double* h[3],
const int bb_start[3]=0, const int bb_end[3]=0) :
m_grid(grid), m_id(id),
m_bbox_start{bb_start[0], bb_start[1], bb_start[2]},
m_bbox_end{bb_end[0], bb_end[1], bb_end[2]},
m_subcount{0},
m_subdim{bb_end[0]-bb_start[0]+1, bb_end[1]-bb_start[1]+1, bb_end[2]-bb_start[2]+1},
m_valid(false),
m_grid_spacing{h[0], h[1], h[2]},
m_start{start[0], start[1], start[2]},
m_end{end[0], end[1], end[2]}
{
assert(m_grid != NULL);
typedef typename TGrid::BlockType TBlock;
// process span
std::vector<BlockInfo> infos_all = grid->getBlocksInfo();
const BlockInfo info_first = infos_all.front();
const BlockInfo info_last = infos_all.back();
const int process_start[3] = {
static_cast<int>(info_first.index[0] * TBlock::sizeX),
static_cast<int>(info_first.index[1] * TBlock::sizeY),
static_cast<int>(info_first.index[2] * TBlock::sizeZ)
};
const int process_end[3] = {
static_cast<int>((info_last.index[0] + 1) * TBlock::sizeX - 1),
static_cast<int>((info_last.index[1] + 1) * TBlock::sizeY - 1),
static_cast<int>((info_last.index[2] + 1) * TBlock::sizeZ - 1)
};
bool b_intersect[3] = { false, false, false };
for (size_t i = 0; i < 3; ++i)
{
// case 1: subdomain is fully contained in this process
// dimension
if (process_start[i] <= m_bbox_start[i] && m_bbox_end[i] <= process_end[i])
{
b_intersect[i] = true;
continue;
}
// case 2: subdomain is partially contained in this process
// dimension (distributed)
if (process_start[i] <= m_bbox_start[i] && m_bbox_start[i] <= process_end[i] && m_bbox_end[i] > process_end[i])
{
m_bbox_end[i] = process_end[i];
b_intersect[i] = true;
}
else if (m_bbox_start[i] < process_start[i] && process_end[i] < m_bbox_end[i])
{
m_bbox_start[i] = process_start[i];
m_bbox_end[i] = process_end[i];
b_intersect[i] = true;
}
else if (m_bbox_start[i] < process_start[i] && process_start[i] <= m_bbox_end[i] && m_bbox_end[i] <= process_end[i])
{
m_bbox_start[i] = process_start[i];
b_intersect[i] = true;
}
}
m_valid = true;
m_max_size = 1;
for (size_t i = 0; i < 3; ++i)
{
m_subcount[i] = m_bbox_end[i] - m_bbox_start[i] + 1;
m_max_size *= static_cast<unsigned long>(m_subcount[i]);
m_valid = m_valid && b_intersect[i];
}
// see which blocks are needed
if (m_valid)
{
for (size_t i = 0; i < infos_all.size(); ++i)
{
const BlockInfo info = infos_all[i];
const int block_start[3] = {
static_cast<int>(info.index[0] * TBlock::sizeX),
static_cast<int>(info.index[1] * TBlock::sizeY),
static_cast<int>(info.index[2] * TBlock::sizeZ)
};
const int block_end[3] = {
static_cast<int>(block_start[0] + TBlock::sizeX - 1),
static_cast<int>(block_start[1] + TBlock::sizeY - 1),
static_cast<int>(block_start[2] + TBlock::sizeZ - 1)
};
const bool b_need_X = ((block_start[0] <= m_bbox_end[0]) && (block_end[0] >= m_bbox_start[0]));
const bool b_need_Y = ((block_start[1] <= m_bbox_end[1]) && (block_end[1] >= m_bbox_start[1]));
const bool b_need_Z = ((block_start[2] <= m_bbox_end[2]) && (block_end[2] >= m_bbox_start[2]));
if (b_need_X && b_need_Y && b_need_Z)
m_intersecting_blocks.push_back( info );
}
}
}
Subdomain(const Subdomain& c) = default;
virtual ~Subdomain() = default;
inline int id() const { return m_id; }
inline const int (&bbox_start() const)[3] { return m_bbox_start; }
inline const int (&bbox_end() const)[3] { return m_bbox_end; }
inline const int (&count() const)[3] { return m_subcount; }
inline const int (&dim() const)[3] { return m_subdim; }
inline int dim(const size_t i) const { assert(i<3); return m_subdim[i]; }
inline const double (&start() const)[3] { return m_start; }
inline double start(const size_t i) const { assert(i<3); return m_start[i]; }
inline const double (&end() const)[3] { return m_end; }
inline double end(const size_t i) const { assert(i<3); return m_end[i]; }
inline const double* grid_spacing(const size_t i) const { assert(i<3); return m_grid_spacing[i]; }
inline unsigned long max_size() const { return m_max_size; }
inline bool valid() const { return m_valid; }
inline const std::vector<BlockInfo>& getBlocksInfo() const { return m_intersecting_blocks; }
inline TGrid* getGrid() const { return m_grid; }
inline std::string name() const
{
std::ostringstream out;
out << "subdomain" << m_id;
return out.str();
}
virtual void show(const std::string prefix="") const
{
std::cout << prefix << "subdomain" << m_id << ":" << std::endl;
std::cout << prefix << "ID = " << m_id << std::endl;
std::cout << prefix << "START = (" << m_start[0] << ", " << m_start[1] << ", " << m_start[2] << ")" << std::endl;
std::cout << prefix << "END = (" << m_end[0] << ", " << m_end[1] << ", " << m_end[2] << ")" << std::endl;
std::cout << prefix << "BBOX_START = (" << m_bbox_start[0] << ", " << m_bbox_start[1] << ", " << m_bbox_start[2] << ")" << std::endl;
std::cout << prefix << "BBOX_END = (" << m_bbox_end[0] << ", " << m_bbox_end[1] << ", " << m_bbox_end[2] << ")" << std::endl;
std::cout << prefix << "DIM = (" << m_subdim[0] << ", " << m_subdim[1] << ", " << m_subdim[2] << ")" << std::endl;
std::cout << prefix << "SUBDIM = (" << m_subcount[0] << ", " << m_subcount[1] << ", " << m_subcount[2] << ")" << std::endl;
std::cout << prefix << "MAXSIZE = " << m_max_size << std::endl;
std::cout << prefix << "VALID = " << m_valid << std::endl;
std::cout << prefix << "NUMBER OF BLOCKS = " << m_intersecting_blocks.size() << std::endl;
}
protected:
TGrid * m_grid;
const int m_id;
int m_bbox_start[3]; // local start indices of bounding box
int m_bbox_end[3]; // local end indices of bounding box
int m_subcount[3]; // number of elements in local subdomain
int m_subdim[3]; // number of elements in global subdomain
unsigned long m_max_size;
bool m_valid;
const double* m_grid_spacing[3];
double m_start[3]; // lower left coordinates of smallest subdomain that contains the specified origin in config file
double m_end[3]; // upper right coordinates of smallest subdomain that contains the specified extent in config file
std::vector<BlockInfo> m_intersecting_blocks;
};
}
///////////////////////////////////////////////////////////////////////////////
// Dumpers
//
// The following requirements for the data TStreamer are required:
// TStreamer::NCHANNELS : Number of data elements (1=Scalar, 3=Vector, 9=Tensor)
// TStreamer::operate : Data access methods for read and write
// TStreamer::getAttributeName : Attribute name of the date ("Scalar", "Vector", "Tensor")
template<typename TStreamer, typename hdf5Real, typename TSubdomain>
void DumpSubdomainHDF5(const TSubdomain& subdomain,
const typename TSubdomain::GridType::Real t,
const std::string &fileroot, // Filename w/o folder or extension.
const std::string &dirname = ".",
const bool bXMF = true)
{
#ifdef CUBISM_USE_HDF
typedef typename TSubdomain::GridType::BlockType B;
std::string filename_h5 = fileroot + ".h5";
std::string fullpath_h5 = dirname + "/" + filename_h5;
std::string fullpath_xmf = dirname + "/" + fileroot + ".xmf";
herr_t status;
hid_t file_id, dataset_id, fspace_id, fapl_id, mspace_id;
///////////////////////////////////////////////////////////////////////////
// startup file
H5open();
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
file_id = H5Fcreate(fullpath_h5.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout);
///////////////////////////////////////////////////////////////////////////
// write mesh
std::vector<int> mesh_dims;
std::vector<std::string> dset_name;
dset_name.push_back("/vx");
dset_name.push_back("/vy");
dset_name.push_back("/vz");
for (size_t i = 0; i < 3; ++i)
{
const int nCells = subdomain.dim(i);
const double* const h = subdomain.grid_spacing(i);
std::vector<double> vertices(nCells+1, subdomain.start(i));
mesh_dims.push_back(vertices.size());
for (int j = 0; j < nCells; ++j)
vertices[j+1] = vertices[j] + h[j];;
hsize_t dim[1] = {vertices.size()};
fspace_id = H5Screate_simple(1, dim, NULL);
#ifndef CUBISM_ON_FERMI
dataset_id = H5Dcreate(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#else
dataset_id = H5Dcreate2(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#endif
status = H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, vertices.data());
status = H5Sclose(fspace_id);
status = H5Dclose(dataset_id);
}
///////////////////////////////////////////////////////////////////////////
// write data
std::vector<BlockInfo> infos_sub = subdomain.getBlocksInfo();
static const unsigned int NCHANNELS = TStreamer::NCHANNELS;
const unsigned int NX = subdomain.count()[0];
const unsigned int NY = subdomain.count()[1];
const unsigned int NZ = subdomain.count()[2];
std::cout << "Allocating " << (subdomain.max_size() * NCHANNELS * sizeof(hdf5Real))/(1024.*1024.) << " MB of HDF5 subdomain data" << std::endl;
hdf5Real * array_all = NULL;
hsize_t count[4] = { NZ, NY, NX, NCHANNELS };
hsize_t dims[4] = { NZ, NY, NX, NCHANNELS };
hsize_t offset[4] = {0, 0, 0, 0};
if (subdomain.valid())
{
array_all = new hdf5Real[NX * NY * NZ * NCHANNELS];
const int bbox_start[3] = {
subdomain.bbox_start()[0],
subdomain.bbox_start()[1],
subdomain.bbox_start()[2]
};
const int bbox_end[3] = {
subdomain.bbox_end()[0],
subdomain.bbox_end()[1],
subdomain.bbox_end()[2]
};
#pragma omp parallel for
for(int i=0; i<(int)infos_sub.size(); i++)
{
BlockInfo& info = infos_sub[i];
const B& b = *(B*)info.ptrBlock;
const int idx[3] = { info.index[0], info.index[1], info.index[2] };
for(int iz=0; iz<static_cast<int>(B::sizeZ); iz++)
for(int iy=0; iy<static_cast<int>(B::sizeY); iy++)
for(int ix=0; ix<static_cast<int>(B::sizeX); ix++)
{
int gx = idx[0]*B::sizeX + ix;
int gy = idx[1]*B::sizeY + iy;
int gz = idx[2]*B::sizeZ + iz;
const bool b_containedX = (bbox_start[0] <= gx) && (gx <= bbox_end[0]);
const bool b_containedY = (bbox_start[1] <= gy) && (gy <= bbox_end[1]);
const bool b_containedZ = (bbox_start[2] <= gz) && (gz <= bbox_end[2]);
if (!(b_containedX && b_containedY && b_containedZ))
continue;
hdf5Real output[NCHANNELS];
for(unsigned int j=0; j<NCHANNELS; ++j)
output[j] = 0;
TStreamer::operate(b, ix, iy, iz, (hdf5Real*)output);
gx -= bbox_start[0]; // shift to process local
gy -= bbox_start[1]; // shift to process local
gz -= bbox_start[2]; // shift to process local
hdf5Real * const ptr = array_all + NCHANNELS*(gx + NX * (gy + NY * gz));
for(unsigned int j=0; j<NCHANNELS; ++j)
ptr[j] = output[j];
}
}
}
fapl_id = H5Pcreate(H5P_DATASET_XFER);
fspace_id = H5Screate_simple(4, dims, NULL);
#ifndef CUBISM_ON_FERMI
dataset_id = H5Dcreate(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#else
dataset_id = H5Dcreate2(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
#endif
fspace_id = H5Dget_space(dataset_id);
H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL);
mspace_id = H5Screate_simple(4, count, NULL);
if (!subdomain.valid())
{
H5Sselect_none(fspace_id);
H5Sselect_none(mspace_id);
}
status = H5Dwrite(dataset_id, get_hdf5_type<hdf5Real>(), mspace_id, fspace_id, fapl_id, array_all);
if (status < 0) H5Eprint1(stdout);
status = H5Sclose(mspace_id); if(status<0) H5Eprint1(stdout);
status = H5Sclose(fspace_id); if(status<0) H5Eprint1(stdout);
status = H5Dclose(dataset_id); if(status<0) H5Eprint1(stdout);
status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout);
status = H5Fclose(file_id); if(status<0) H5Eprint1(stdout);
H5close();
if (subdomain.valid())
delete [] array_all;
if (bXMF)
{
FILE *xmf = 0;
xmf = fopen(fullpath_xmf.c_str(), "w");
fprintf(xmf, "<?xml version=\"1.0\" ?>\n");
fprintf(xmf, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n");
fprintf(xmf, "<Xdmf Version=\"2.0\">\n");
fprintf(xmf, " <Domain>\n");
fprintf(xmf, " <Grid GridType=\"Uniform\">\n");
fprintf(xmf, " <Time Value=\"%e\"/>\n\n", t);
fprintf(xmf, " <Topology TopologyType=\"3DRectMesh\" Dimensions=\"%d %d %d\"/>\n\n", mesh_dims[2], mesh_dims[1], mesh_dims[0]);
fprintf(xmf, " <Geometry GeometryType=\"VxVyVz\">\n");
fprintf(xmf, " <DataItem Name=\"mesh_vx\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[0]);
fprintf(xmf, " %s:/vx\n", filename_h5.c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " <DataItem Name=\"mesh_vy\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[1]);
fprintf(xmf, " %s:/vy\n", filename_h5.c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " <DataItem Name=\"mesh_vz\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[2]);
fprintf(xmf, " %s:/vz\n", filename_h5.c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " </Geometry>\n\n");
fprintf(xmf, " <Attribute Name=\"data\" AttributeType=\"%s\" Center=\"Cell\">\n", TStreamer::getAttributeName());
fprintf(xmf, " <DataItem Dimensions=\"%d %d %d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n", (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)sizeof(hdf5Real));
fprintf(xmf, " %s:/data\n", filename_h5.c_str());
fprintf(xmf, " </DataItem>\n");
fprintf(xmf, " </Attribute>\n");
fprintf(xmf, " </Grid>\n");
fprintf(xmf, " </Domain>\n");
fprintf(xmf, "</Xdmf>\n");
fclose(xmf);
}
#else
#warning USE OF HDF WAS DISABLED AT COMPILE TIME
#endif
}
CUBISM_NAMESPACE_END
#endif /* HDF5SUBDOMAINDUMPER_H_3C2DKYV4 */
|
sparse_matrix.h | /****************************************************************************************************************/
/* */
/* OpenNN: Open Neural Networks Library */
/* www.opennn.net */
/* */
/* S P A R S E M A T R I X C O N T A I N E R */
/* */
/* Fernando Gomez */
/* Artificial Intelligence Techniques SL */
/* fernandogomez@artelnics.com */
/* */
/****************************************************************************************************************/
#ifndef __SPARSEMATRIX_H__
#define __SPARSEMATRIX_H__
// System includes
#include <cmath>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <stdexcept>
// OpenNN includes
#include "matrix.h"
namespace OpenNN
{
/// This template class defines a sparse matrix for general purpose use.
/// This sparse matrix also implements some mathematical methods which can be useful.
template <class T>
class SparseMatrix
{
public:
// CONSTRUCTORS
explicit SparseMatrix();
explicit SparseMatrix(const size_t&, const size_t&);
explicit SparseMatrix(const string&);
SparseMatrix(const SparseMatrix&);
// DESTRUCTOR
virtual ~SparseMatrix();
// ASSIGNMENT OPERATORS
inline SparseMatrix<T>& operator = (const SparseMatrix<T>&);
// REFERENCE OPERATORS
inline const T operator()(const size_t&, const size_t&) const;
bool operator == (const SparseMatrix<T>&) const;
bool operator == (const Matrix<T>&) const;
bool operator == (const T&) const;
bool operator != (const SparseMatrix<T>&) const;
bool operator != (const Matrix<T>&) const;
bool operator >(const SparseMatrix<T>&) const;
bool operator >(const Matrix<T>&) const;
bool operator >(const T&) const;
bool operator <(const SparseMatrix<T>&) const;
bool operator <(const Matrix<T>&) const;
bool operator <(const T&) const;
bool operator >= (const SparseMatrix<T>&) const;
bool operator >= (const Matrix<T>&) const;
bool operator >= (const T&) const;
bool operator <= (const SparseMatrix<T>&) const;
bool operator <= (const Matrix<T>&) const;
bool operator <= (const T&) const;
// METHODS
// Get methods
const size_t& get_rows_number() const;
const size_t& get_columns_number() const;
const Vector<size_t>& get_rows_indices() const;
const Vector<size_t>& get_columns_indices() const;
const Vector<T>& get_matrix_values() const;
// Set methods
void set();
void set(const size_t&, const size_t&);
void set(const SparseMatrix<T>&);
void set(const string&);
void set_identity(const size_t&);
void set_rows_number(const size_t&);
void set_columns_number(const size_t&);
void set_element(const size_t&, const size_t&, const T&);
void set_values(const Vector<size_t>&, const Vector<size_t>&, const Vector<T>&);
// Count methods
size_t count_diagonal_elements() const;
size_t count_off_diagonal_elements() const;
size_t count_equal_to(const T&) const;
size_t count_equal_to(const size_t&, const T&) const;
size_t count_not_equal_to(const T&) const;
size_t count_not_equal_to(const size_t&, const T&) const;
size_t count_rows_equal_to(const Vector<size_t>&, const T&) const;
bool is_row_equal_to(const size_t&, const Vector<size_t>&, const T&) const;
Vector<size_t> get_row_indices_equal_to(const Vector<size_t>&, const T&) const;
SparseMatrix<T> get_sub_sparse_matrix(const Vector<size_t>&, const Vector<size_t>&) const;
SparseMatrix<T> get_sub_sparse_matrix_rows(const Vector<size_t>&) const;
SparseMatrix<T> get_sub_sparse_matrix_columns(const Vector<size_t>&) const;
Vector<T> get_row(const size_t&) const;
Vector<T> get_rows(const size_t&, const size_t&) const;
Vector<T> get_row(const size_t&, const Vector<size_t>&) const;
Vector<T> get_column(const size_t&) const;
size_t count_unique() const;
Vector<T> get_diagonal() const;
void set_row(const size_t&, const Vector<T>&);
void set_row(const size_t&, const T&);
void set_column(const size_t&, const Vector<T>&);
void set_column(const size_t&, const T&);
void set_diagonal(const T&);
void set_diagonal(const Vector<T>&);
void initialize_diagonal(const T&);
void initialize_diagonal(const size_t&, const T&);
void initialize_diagonal(const size_t&, const Vector<T>&);
void initialize_identity();
SparseMatrix<T> sum_diagonal(const T&) const;
SparseMatrix<T> sum_diagonal(const Vector<T>&) const;
void append_row(const Vector<T>&);
void append_column(const Vector<T>&);
SparseMatrix<T> insert_row(const size_t&, const Vector<T>&) const;
SparseMatrix<T> insert_column(const size_t&, const Vector<T>&);
SparseMatrix<T> merge_matrices(const SparseMatrix<T>&, const size_t&, const size_t&) const;
Matrix<T> merge_matrices(const Matrix<T>&, const size_t&, const size_t&) const;
SparseMatrix<T> delete_row(const size_t&) const;
SparseMatrix<T> delete_rows(const Vector<size_t>&) const;
SparseMatrix<T> delete_rows_with_value(const T&) const;
SparseMatrix<T> delete_first_rows(const size_t&) const;
SparseMatrix<T> get_first_rows(const size_t&) const;
SparseMatrix<T> delete_last_rows(const size_t&) const;
SparseMatrix<T> get_last_rows(const size_t&) const;
SparseMatrix<T> delete_column(const size_t&) const;
SparseMatrix<T> delete_columns(const Vector<size_t>&) const;
SparseMatrix<T> remove_constant_rows() const;
SparseMatrix<T> remove_constant_columns() const;
SparseMatrix<T> assemble_rows(const SparseMatrix<T>&) const;
Matrix<T> assemble_rows(const Matrix<T>&) const;
SparseMatrix<T> assemble_columns(const SparseMatrix<T>&) const;
Matrix<T> assemble_columns(const Matrix<T>&) const;
SparseMatrix<T> sort_ascending(const size_t&) const;
SparseMatrix<T> sort_descending(const size_t&) const;
void replace(const T&, const T&);
void replace_in_row(const size_t&, const T&, const T&);
void replace_in_column(const size_t&, const T&, const T&);
bool has_column_value(const size_t&, const T&) const;
// Mathematical methods
T calculate_sum() const;
Vector<int> calculate_rows_sum_int() const;
Vector<T> calculate_rows_sum() const;
Vector<T> calculate_columns_sum() const;
Vector<size_t> calculate_most_frequent_columns_indices(const size_t& = 10);
void sum_row(const size_t&, const Vector<T>&);
double calculate_trace() const;
Vector<double> calculate_mean() const;
double calculate_mean(const size_t&) const;
Vector<double> calculate_mean(const Vector<size_t>&) const;
Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const;
Vector<double> calculate_mean_missing_values(const Vector< Vector<size_t> >&) const;
Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const;
Vector< Vector<double> > calculate_mean_standard_deviation() const;
Vector< Vector<double> > calculate_mean_standard_deviation(const Vector<size_t>&) const;
Vector< Vector<double> > calculate_mean_standard_deviation(const Vector<size_t>&, const Vector<size_t>&) const;
Vector<double> calculate_median() const;
double calculate_median(const size_t&) const;
Vector<double> calculate_median(const Vector<size_t>&) const;
Vector<double> calculate_median(const Vector<size_t>&, const Vector<size_t>&) const;
Vector<double> calculate_median_missing_values(const Vector< Vector<size_t> >&) const;
Vector<double> calculate_median_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const;
T calculate_minimum() const;
T calculate_maximum() const;
T calculate_column_minimum(const size_t&) const;
T calculate_column_maximum(const size_t&) const;
Vector<T> calculate_means_binary() const;
Vector<T> calculate_means_binary_column() const;
Vector<T> calculate_means_binary_columns() const;
Vector<T> calculate_means_binary_missing_values(const Vector< Vector<size_t> >&) const;
Vector<T> calculate_means_binary_column_missing_values(const Vector< Vector<size_t> >&) const;
Vector<T> calculate_means_binary_columns_missing_values(const Vector< Vector<size_t> >&) const;
Vector< Vector<T> > calculate_minimum_maximum() const;
Vector< Vector<T> > calculate_minimum_maximum(const Vector<size_t>&) const;
Vector< Vector<T> > calculate_minimum_maximum(const Vector<size_t>&, const Vector<size_t>&) const;
Vector< Statistics<T> > calculate_statistics() const;
Vector< Statistics<T> > calculate_statistics(const Vector<size_t>&, const Vector<size_t>&) const;
Vector< Statistics<T> > calculate_statistics(const Vector< Vector<size_t> >&, const Vector<size_t>&) const;
Vector< Statistics<T> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const;
Vector< Statistics<T> > calculate_columns_statistics_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >) const;
Vector< Statistics<T> > calculate_rows_statistics(const Vector<size_t>&) const;
Vector< Statistics<T> > calculate_rows_statistics_missing_values(const Vector<size_t>&, const Vector< Vector<size_t> >&) const;
Vector< Statistics<T> > calculate_columns_statistics(const Vector<size_t>&) const;
Vector<T> calculate_rows_means(const Vector<size_t>& = Vector<size_t>()) const;
Vector<T> calculate_columns_minimums(const Vector<size_t>& = Vector<size_t>()) const;
Vector<T> calculate_columns_maximums(const Vector<size_t>& = Vector<size_t>()) const;
Vector< Vector<double> > calculate_box_plots(const Vector< Vector<size_t> >&, const Vector<size_t>&) const;
SparseMatrix<double> calculate_covariance_sparse_matrix() const;
Vector< Histogram<T> > calculate_histograms(const size_t& = 10) const;
Vector< Histogram<T> > calculate_histograms_missing_values(const Vector< Vector<size_t> >&, const size_t& = 10) const;
void scale_mean_standard_deviation(const Vector< Statistics<T> >&);
Vector< Statistics<T> > scale_mean_standard_deviation();
void scale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&);
void scale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&);
void scale_minimum_maximum(const Vector< Statistics<T> >&);
Vector< Statistics<T> > scale_minimum_maximum();
void scale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&);
void scale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&);
void scale_logarithmic(const Vector< Statistics<T> >&);
Vector< Statistics<T> > scale_logarithmic();
void scale_rows_logarithmic(const Vector< Statistics<T> >&, const Vector<size_t>&);
void scale_columns_logarithmic(const Vector<Statistics<T> >&, const Vector<size_t>&);
void unscale_mean_standard_deviation(const Vector< Statistics<T> >&);
void unscale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&);
void unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&);
void unscale_minimum_maximum(const Vector< Statistics<T> >&);
void unscale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&);
void unscale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&);
void unscale_logarithmic(const Vector< Statistics<T> >&);
void unscale_rows_logarithmic(const Vector< Statistics<T> >&, const Vector<size_t>&);
void unscale_columns_logarithmic(const Vector< Statistics<T> >&, const Vector<size_t>&);
Vector<size_t> calculate_minimal_indices() const;
Vector<size_t> calculate_maximal_indices() const;
Vector< Vector<size_t> > calculate_minimal_maximal_indices() const;
double calculate_sum_squared_error(const SparseMatrix<double>&) const;
double calculate_sum_squared_error(const Matrix<T>&) const;
double calculate_minkowski_error(const SparseMatrix<double>&, const double&) const;
double calculate_minkowski_error(const Matrix<T>&, const double&) const;
double calculate_sum_squared_error(const Vector<double>&) const;
Vector<double> calculate_rows_norm() const;
SparseMatrix<T> calculate_absolute_value() const;
SparseMatrix<T> calculate_transpose() const;
T calculate_determinant() const;
SparseMatrix<T> calculate_cofactor() const;
SparseMatrix<T> calculate_inverse() const;
SparseMatrix<T> calculate_LU_inverse() const;
Vector<T> solve_LDLT(const Vector<double>&) const;
double calculate_distance(const size_t&, const size_t&) const;
SparseMatrix<T> operator + (const T&) const;
SparseMatrix<T> operator + (const Vector<T>&) const;
SparseMatrix<T> operator + (const SparseMatrix<T>&) const;
SparseMatrix<T> operator + (const Matrix<T>&) const;
SparseMatrix<T> operator -(const T&) const;
SparseMatrix<T> operator -(const Vector<T>&) const;
SparseMatrix<T> operator -(const SparseMatrix<T>&) const;
SparseMatrix<T> operator -(const Matrix<T>&) const;
SparseMatrix<T> operator *(const T&) const;
SparseMatrix<T> operator *(const Vector<T>&) const;
SparseMatrix<T> operator *(const SparseMatrix<T>&) const;
SparseMatrix<T> operator *(const Matrix<T>&) const;
SparseMatrix<T> operator /(const T&) const;
SparseMatrix<T> operator /(const Vector<T>&) const;
SparseMatrix<T> operator /(const SparseMatrix<T>&) const;
SparseMatrix<T> operator /(const Matrix<T>&) const;
void operator += (const T&);
void operator += (const Vector<T>&);
void operator += (const SparseMatrix<T>&);
void operator += (const Matrix<T>&);
void operator -= (const T&);
void operator -= (const Vector<T>&);
void operator -= (const SparseMatrix<T>&);
void operator -= (const Matrix<T>&);
void operator *= (const T&);
void operator *= (const Vector<T>&);
void operator *= (const SparseMatrix<T>&);
void operator *= (const Matrix<T>&);
void operator /= (const T&);
void operator /= (const Vector<T>&);
void operator /= (const SparseMatrix<T>&);
void operator /= (const Matrix<T>&);
Vector<double> dot(const Vector<double>&) const;
SparseMatrix<double> dot(const SparseMatrix<double>&) const;
Matrix<T> dot(const Matrix<T>&) const;
Matrix<T> calculate_eigenvalues() const;
Matrix<T> calculate_eigenvectors() const;
SparseMatrix<T> direct(const SparseMatrix<T>&) const;
SparseMatrix<T> direct(const Matrix<T>&) const;
bool empty() const;
bool is_square() const;
bool is_symmetric() const;
bool is_antisymmetric() const;
bool is_diagonal() const;
bool is_scalar() const;
bool is_identity() const;
bool is_binary() const;
bool is_column_binary(const size_t&) const;
bool is_column_constant(const size_t&) const;
bool is_dense(const double& = 1) const;
void convert_association();
KMeansResults<T> calculate_k_means(const size_t&) const;
// Correlation methods
Vector<T> calculate_multiple_linear_regression_parameters(const Vector<T>&) const;
double calculate_multiple_linear_correlation(const Vector<T>&) const;
// Serialization methods
void print() const;
void load(const string&);
Vector<string> load_product_strings(const string&, const char& = ',');
void load_binary(const string&);
void save(const string&) const;
void save_binary(const string&) const;
void save_csv(const string&, const char& = ',', const Vector<string>& = Vector<string>(), const Vector<string>& = Vector<string>(), const string& = "Id") const;
void parse(const string&);
string SparseMatrix_to_string(const char& = ' ') const;
SparseMatrix<size_t> to_size_t_SparseMatrix() const;
SparseMatrix<double> to_double_SparseMatrix() const;
SparseMatrix<string> to_string_SparseMatrix(const size_t& = 3) const;
Matrix<T> to_matrix() const;
Vector< Vector<T> > to_vector_of_vectors() const;
Vector< Vector<size_t> > to_CSR(Vector<T>&) const;
void from_CSR(const Vector<size_t>&, const Vector<size_t>&, const Vector<T>&);
void print_preview() const;
private:
/// Number of rows in the sparse matrix.
size_t rows_number;
/// Number of columns in the sparse matrix.
size_t columns_number;
/// Indices of the rows with values different than 0 in the sparse matrix.
Vector<size_t> rows_indices;
/// Indices of the columns with values different than 0 in the sparse matrix.
Vector<size_t> columns_indices;
/// Values different than 0 in the sparse matrix.
Vector<T> matrix_values;
};
// CONSTRUCTORS
/// Default constructor. It creates a sparse matrix with zero rows and zero columns.
template <class T>
SparseMatrix<T>::SparseMatrix()
{
rows_number = 0;
columns_number = 0;
}
/// Constructor. It creates a SparseMatrix with n rows and m columns, containing n*m copies of the default value for Type.
/// @param new_rows_number Number of rows in SparseMatrix.
/// @param new_columns_number Number of columns in SparseMatrix.
template <class T>
SparseMatrix<T>::SparseMatrix(const size_t& new_rows_number, const size_t& new_columns_number)
{
if(new_rows_number == 0 && new_columns_number == 0)
{
rows_number = 0;
columns_number = 0;
}
else if(new_rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Constructor SparseMatrix(const size_t&, const size_t&).\n"
<< "Number of rows must be greater than zero.\n";
throw logic_error(buffer.str());
}
else if(new_columns_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Constructor SparseMatrix(const size_t&, const size_t&).\n"
<< "Number of columns must be greater than zero.\n";
throw logic_error(buffer.str());
}
else
{
rows_number = new_rows_number;
columns_number = new_columns_number;
}
}
/// File constructor. It creates a SparseMatrix which members are loaded from a data file.
/// @param file_name Name of SparseMatrix data file.
template <class T>
SparseMatrix<T>::SparseMatrix(const string& file_name)
{
rows_number = 0;
columns_number = 0;
load(file_name);
}
/// Copy constructor. It creates a copy of an existing SparseMatrix.
/// @param other_sparse_matrix SparseMatrix to be copied.
template <class T>
SparseMatrix<T>::SparseMatrix(const SparseMatrix& other_sparse_matrix)
{
rows_number = other_sparse_matrix.rows_number;
columns_number = other_sparse_matrix.columns_number;
rows_indices = other_sparse_matrix.rows_indices;
columns_indices = other_sparse_matrix.columns_indices;
matrix_values = other_sparse_matrix.matrix_values;
}
// DESTRUCTOR
/// Destructor.
template <class T>
SparseMatrix<T>::~SparseMatrix()
{
rows_number = 0;
columns_number = 0;
Vector<size_t>().swap(rows_indices);
Vector<size_t>().swap(columns_indices);
Vector<T>().swap(matrix_values);
}
// ASSIGNMENT OPERATORS
/// Assignment operator. It assigns to self a copy of an existing SparseMatrix.
/// @param other_sparse_matrix SparseMatrix to be assigned.
template <class T>
SparseMatrix<T>& SparseMatrix<T>::operator = (const SparseMatrix<T>& other_sparse_matrix)
{
if(other_sparse_matrix.rows_number != rows_number || other_sparse_matrix.columns_number != columns_number)
{
rows_number = other_sparse_matrix.rows_number;
columns_number = other_sparse_matrix.columns_number;
}
Vector<size_t>().swap(rows_indices);
Vector<size_t>().swap(columns_indices);
Vector<T>().swap(matrix_values);
rows_indices = other_sparse_matrix.rows_indices;
columns_indices = other_sparse_matrix.columns_indices;
matrix_values = other_sparse_matrix.matrix_values;
return(*this);
}
// REFERENCE OPERATORS
/// Reference operator.
/// Returns the element(i,j) of the SparseMatrix.
/// @param row Index of row.
/// @param column Index of column.
template <class T>
inline const T SparseMatrix<T>::operator()(const size_t& row, const size_t& column) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(row >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "operator()(const size_t&, const size_t&).\n"
<< "Row index (" << row << ") must be less than number of rows (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
else if(column >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "operator()(const size_t&, const size_t&).\n"
<< "Column index (" << column << ") must be less than number of columns (" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
T value = T();
if(rows_indices.contains(row) && columns_indices.contains(column))
{
const size_t rows_indices_size = rows_indices.size();
for(size_t i = 0; i < rows_indices_size; i++)
{
if(rows_indices[i] == row && columns_indices[i] == column)
{
value = matrix_values[i];
break;
}
}
}
return(value);
}
// bool operator == (const SparseMatrix<T>&) const
/// Equivalent relational operator between this other_sparse_matrix and other other_sparse_matrix.
/// It produces true if all the elements of the two matrices are equal, and false otherwise.
/// @param other_sparse_matrix Sparse matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator == (const SparseMatrix<T>& other_sparse_matrix) const
{
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
const size_t other_nonzero_columns_number = other_sparse_matrix.get_columns_indices().size();
const size_t other_nonzero_rows_number = other_sparse_matrix.get_rows_indices().size();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
return(false);
}
else if(other_nonzero_columns_number != columns_indices.size() || other_nonzero_rows_number != rows_indices.size())
{
return false;
}
else if(other_nonzero_columns_number != 0 && other_nonzero_rows_number != 0)
{
for(size_t i = 0; i < other_nonzero_rows_number; i++)
{
const size_t current_row_index = other_sparse_matrix.rows_indices[i];
const size_t current_column_index = other_sparse_matrix.columns_indices[i];
const Vector<size_t> this_equal_rows_indices = rows_indices.calculate_equal_to_indices(current_row_index);
const Vector<size_t> this_equal_columns_indices = columns_indices.calculate_equal_to_indices(current_column_index);
const Vector<size_t> intersection = this_equal_rows_indices.get_intersection(this_equal_columns_indices);
if(intersection.size() != 1 || matrix_values[intersection[0]] != other_sparse_matrix.matrix_values[i])
{
return false;
}
}
}
return(true);
}
// bool operator == (const T&)
/// Equivalent relational operator between this sparse matrix and a dense matrix.
/// @param other_matrix Dense matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator == (const Matrix<T>& other_matrix) const
{
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
return false;
}
for(size_t i = 0; i < other_rows_number; i++)
{
if(!rows_indices.contains(i) && other_matrix.get_row(i).count_equal_to(T()) == other_columns_number)
{
continue;
}
for(size_t j = 0; j < other_columns_number; j++)
{
if(other_matrix(i,j) != (*this)(i,j))
{
return false;
}
}
}
return(true);
}
template <class T>
bool SparseMatrix<T>::operator == (const T& value) const
{
if(value == T())
{
return(matrix_values == value);
}
else if(is_dense())
{
return(matrix_values == value);
}
else
{
return false;
}
}
// bool operator != (const SparseMatrix<T>&)
/// Not equivalent relational operator between this sparse matrix and other sparse matrix.
/// It produces true if the two matrices have any not equal element, and false otherwise.
/// @param other_sparse_matrix Sparse matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator != (const SparseMatrix<T>& other_sparse_matrix) const
{
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
const size_t other_nonzero_columns_number = other_sparse_matrix.get_columns_indices().size();
const size_t other_nonzero_rows_number = other_sparse_matrix.get_rows_indices().size();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
return(true);
}
else if(other_nonzero_columns_number != columns_indices.size() || other_nonzero_rows_number != rows_indices.size())
{
return true;
}
else if(other_nonzero_columns_number != 0 && other_nonzero_rows_number != 0)
{
for(size_t i = 0; i < other_nonzero_rows_number; i++)
{
const size_t current_row_index = other_sparse_matrix.rows_indices[i];
const size_t current_column_index = other_sparse_matrix.columns_indices[i];
const Vector<size_t> this_equal_rows_indices = rows_indices.calculate_equal_to_indices(current_row_index);
const Vector<size_t> this_equal_columns_indices = columns_indices.calculate_equal_to_indices(current_column_index);
const Vector<size_t> intersection = this_equal_rows_indices.get_intersection(this_equal_columns_indices);
if(intersection.size() != 1 || matrix_values[intersection[0]] != other_sparse_matrix.matrix_values[i])
{
return true;
}
}
}
return(false);
}
// bool operator != (const Matrix<T>&) const
/// Not equivalent relational operator between this sparse matrix and a dense matrix.
/// @param other_matrix Dense matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator != (const Matrix<T>& other_matrix) const
{
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
return false;
}
for(size_t i = 0; i < other_rows_number; i++)
{
if(!rows_indices.contains(i) && other_matrix.get_row(i).count_equal_to(T()) == other_columns_number)
{
continue;
}
for(size_t j = 0; j < other_columns_number; j++)
{
if(other_matrix(i,j) != (*this)(i,j))
{
return true;
}
}
}
return(false);
}
// bool operator >(const SparseMatrix<T>&) const
/// Greater than relational operator between this sparse matrix and other matrix.
/// It produces true if all the elements of this sparse matrix are greater than the corresponding elements of the other sparse matrix,
/// and false otherwise.
/// @param other_sparse_matrix Sparse matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator >(const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >(const SparseMatrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >(const SparseMatrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) <= other_sparse_matrix(i,j))
{
return false;
}
}
}
return(true);
}
// bool operator >(const Matrix<T>&) const
/// Greater than relational operator between this sparse matrix and a dense matrix.
/// @param other_matrix Dense matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator >(const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >(const Matrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >(const Matrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) <= other_matrix(i,j))
{
return false;
}
}
}
return(true);
}
template <class T>
bool SparseMatrix<T>::operator >(const T& value) const
{
if(value >= T() && is_dense())
{
return(matrix_values > value);
}
if(value < T())
{
return(matrix_values > value);
}
else
{
return false;
}
}
// bool operator <(const SparseMatrix<T>&) const
/// Less than relational operator between this sparse matrix and other sparse matrix.
/// It produces true if all the elements of this sparse matrix are less than the corresponding elements of the other sparse matrix,
/// and false otherwise.
/// @param other_sparse_matrix Sparse matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator <(const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <(const SparseMatrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <(const SparseMatrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) >= other_sparse_matrix(i,j))
{
return false;
}
}
}
return(true);
}
// bool operator <(const Matrix<T>&) const
/// Less than relational operator between this sparse matrix and a dense matrix.
/// @param other_matrix Dense matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator <(const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <(const Matrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <(const Matrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) >= other_matrix(i,j))
{
return false;
}
}
}
return(true);
}
template <class T>
bool SparseMatrix<T>::operator <(const T& value) const
{
if(value <= T() && is_dense())
{
return(matrix_values < value);
}
if(value > T())
{
return(matrix_values < value);
}
else
{
return false;
}
}
// bool operator >= (const SparseMatrix<T>&) const
/// Greater than or equal to relational operator between this sparse matrix and other sparse matrix.
/// It produces true if all the elements of this sparse matrix are greater than or equal to the corresponding elements of the
/// other sparse matrix, and false otherwise.
/// @param other_sparse_matrix Sparse matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator >= (const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >= (const SparseMatrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >= (const SparseMatrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) < other_sparse_matrix(i,j))
{
return false;
}
}
}
return(true);
}
// bool operator >= (const Matrix<T>&) const
/// Greater than or equal to than relational operator between this sparse matrix and a dense matrix.
/// @param other_matrix Dense matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator >= (const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >= (const Matrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator >= (const Matrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) < other_matrix(i,j))
{
return false;
}
}
}
return(true);
}
template <class T>
bool SparseMatrix<T>::operator >= (const T& value) const
{
if(value > T() && is_dense())
{
return(matrix_values >= value);
}
else if(value <= T())
{
return(matrix_values >= value);
}
else
{
return false;
}
}
// bool operator <= (const SparseMatrix<T>&) const
/// Less than or equal to relational operator between this sparse matrix and other sparse matrix.
/// It produces true if all the elements of this sparse matrix are less than or equal to the corresponding elements of the
/// other sparse matrix, and false otherwise.
/// @param other_sparse_matrix Sparse matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator <= (const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <= (const SparseMatrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <= (const SparseMatrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) > other_sparse_matrix(i,j))
{
return false;
}
}
}
return(true);
}
// bool operator <= (const Matrix<T>&) const
/// Less than or equal to than relational operator between this sparse matrix and a dense matrix.
/// @param other_matrix Dense matrix to be compared with.
template <class T>
bool SparseMatrix<T>::operator <= (const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <= (const Matrix<T>&) const.\n"
<< "Both numbers of rows must be the same.\n";
throw logic_error(buffer.str());
}
else if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool operator <= (const Matrix<T>&) const.\n"
<< "Both numbers of columns must be the same.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
if((*this)(i,j) > other_matrix(i,j))
{
return false;
}
}
}
return(true);
}
template <class T>
bool SparseMatrix<T>::operator <= (const T& value) const
{
if(value >= T())
{
return(matrix_values <= value);
}
else if(value < T() && is_dense())
{
return(matrix_values <= value);
}
else
{
return false;
}
}
// METHODS
// Get methods
// size_t get_rows_number() const method
/// Returns the number of rows in the sparse matrix.
template <class T>
const size_t& SparseMatrix<T>::get_rows_number() const
{
return(rows_number);
}
// size_t get_columns_number() const method
/// Returns the number of columns in the sparse matrix.
template <class T>
const size_t& SparseMatrix<T>::get_columns_number() const
{
return(columns_number);
}
// Vector<size_t> get_rows_indices() const method
/// Returns the indices of rows in the sparse matrix.
template <class T>
const Vector<size_t>& SparseMatrix<T>::get_rows_indices() const
{
return(rows_indices);
}
// Vector<size_t> get_columns_indices() const method
/// Returns the indices of columns in the sparse matrix.
template <class T>
const Vector<size_t>& SparseMatrix<T>::get_columns_indices() const
{
return(columns_indices);
}
// size_t get_columns_indices() const method
/// Returns the values in the sparse matrix.
template <class T>
const Vector<T>& SparseMatrix<T>::get_matrix_values() const
{
return(matrix_values);
}
// Set methods
// void set() method
/// This method set the numbers of rows and columns of the sparse matrix to zero.
template <class T>
void SparseMatrix<T>::set()
{
rows_number = 0;
columns_number = 0;
Vector<size_t>().swap(rows_indices);
Vector<size_t>().swap(columns_indices);
Vector<T>().swap(matrix_values);
}
// void set(const size_t&, const size_t&) method
/// This method set new numbers of rows and columns in the sparse matrix.
/// @param new_rows_number Number of rows.
/// @param new_columns_number Number of columns.
template <class T>
void SparseMatrix<T>::set(const size_t& new_rows_number, const size_t& new_columns_number)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(new_rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void set(const size_t&, const size_t&) method.\n"
<< "Number of rows must be greater than zero.\n";
throw logic_error(buffer.str());
}
else if(new_columns_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void set(const size_t&, const size_t&) method.\n"
<< "Number of columns must be greater than zero.\n";
throw logic_error(buffer.str());
}
#endif
Vector<size_t>().swap(rows_indices);
Vector<size_t>().swap(columns_indices);
Vector<T>().swap(matrix_values);
rows_number = new_rows_number;
columns_number = new_columns_number;
}
// void set(const SparseMatrix<T>&) method
/// Sets all the members of the sparse matrix to those of another sparse matrix.
/// @param other_sparse_matrix Setting sparse matrix.
template <class T>
void SparseMatrix<T>::set(const SparseMatrix<T>& other_sparse_matrix)
{
if(other_sparse_matrix.rows_number != rows_number || other_sparse_matrix.columns_number != columns_number)
{
rows_number = other_sparse_matrix.rows_number;
columns_number = other_sparse_matrix.columns_number;
}
Vector<size_t>().swap(rows_indices);
Vector<size_t>().swap(columns_indices);
Vector<T>().swap(matrix_values);
rows_indices = other_sparse_matrix.rows_indices;
columns_indices = other_sparse_matrix.columns_indices;
matrix_values = other_sparse_matrix.matrix_values;
}
// void set(const string&) method
/// Sets the members of this object by loading them from a data file.
/// @param file_name Name of data file.
template <class T>
void SparseMatrix<T>::set(const string& file_name)
{
set();
load(file_name);
}
// void set_identity(const size_t&) method
/// Sets the sparse matrix to be squared, with elements equal one in the diagonal and zero outside the diagonal.
/// @param new_size New number of rows and columns in this sparse matrix.
template <class T>
void SparseMatrix<T>::set_identity(const size_t& new_size)
{
set(new_size, new_size);
initialize_identity();
}
// void set_rows_number(const size_t&) method
/// Sets a new number of rows in the sparse matrix.
/// @param new_rows_number Number of sparse matrix rows.
template <class T>
void SparseMatrix<T>::set_rows_number(const size_t& new_rows_number)
{
const size_t nonzero_elements_number = matrix_values.size();
Vector<bool> indices_to_remove(nonzero_elements_number, false);
for(size_t i = 0; i < nonzero_elements_number; i++)
{
if(rows_indices[i] >= new_rows_number)
{
indices_to_remove[i] = true;
}
}
Vector<size_t> greater_than_indices = indices_to_remove.calculate_equal_to_indices(true);
if(greater_than_indices.size() > 0)
{
rows_indices = rows_indices.delete_indices(greater_than_indices);
columns_indices = columns_indices.delete_indices(greater_than_indices);
matrix_values = matrix_values.delete_indices(greater_than_indices);
}
rows_number = new_rows_number;
}
// void set_columns_number(const size_t&) method
/// Sets a new number of columns in the sparse matrix.
/// @param new_columns_number Number of sparse matrix columns.
template <class T>
void SparseMatrix<T>::set_columns_number(const size_t& new_columns_number)
{
Vector<size_t> greater_than_indices = columns_indices.calculate_greater_equal_to_indices(new_columns_number);
if(greater_than_indices.size() > 0)
{
rows_indices = rows_indices.delete_indices(greater_than_indices);
columns_indices = columns_indices.delete_indices(greater_than_indices);
matrix_values = matrix_values.delete_indices(greater_than_indices);
}
columns_number = new_columns_number;
}
template <class T>
void SparseMatrix<T>::set_element(const size_t& row_index, const size_t& column_index, const T& value)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(row_index >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_element(const size_t&, const size_t&, const T&).\n"
<< "Row index (" << row_index << ") must be less than number of rows (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
else if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_element(const size_t&, const size_t&, const T&).\n"
<< "Column index (" << column_index << ") must be less than number of columns (" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
if(value == T())
{
return;
}
Vector<size_t> equal_row_indices = rows_indices.calculate_equal_to_indices(row_index);
Vector<size_t> equal_columns_indices = columns_indices.calculate_equal_to_indices(column_index);
Vector<size_t> intersection = equal_row_indices.get_intersection(equal_columns_indices);
if(intersection.size() == 0 && value != T())
{
rows_indices.push_back(row_index);
columns_indices.push_back(column_index);
matrix_values.push_back(value);
}
else if(intersection.size() != 0 && value != T())
{
matrix_values[intersection[0]] = value;
}
else if(intersection.size() != 0 && value == T())
{
rows_indices = rows_indices.delete_index(intersection[0]);
columns_indices = columns_indices.delete_index(intersection[0]);
matrix_values = matrix_values.delete_index(intersection[0]);
}
}
template <class T>
void SparseMatrix<T>::set_values(const Vector<size_t>& new_rows_indices, const Vector<size_t>& new_columns_indices, const Vector<T>& new_matrix_values)
{
#ifdef __OPENNN_DEBUG__
if(new_rows_indices.size() != new_columns_indices.size() || new_columns_indices.size() != new_matrix_values.size())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void set_values(const Vector<size_t>&, const Vector<size_t>&, const Vector<T>&) const method.\n"
<< "Size of the three vector must be the same.\n";
throw logic_error(buffer.str());
}
const size_t maximum_new_rows_indices = new_rows_indices.calculate_maximum();
if(maximum_new_rows_indices > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void set_values(const Vector<size_t>&, const Vector<size_t>&, const Vector<T>&) const method.\n"
<< "Maximum of new row indices(" << maximum_new_rows_indices << ") must be less than the number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
const size_t maximum_new_columns_indices = new_columns_indices.calculate_maximum();
if(maximum_new_columns_indices > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void set_values(const Vector<size_t>&, const Vector<size_t>&, const Vector<T>&) const method.\n"
<< "Maximum of new columns indices(" << maximum_new_columns_indices << ") must be less than the number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
Vector<size_t>().swap(rows_indices);
Vector<size_t>().swap(columns_indices);
Vector<T>().swap(matrix_values);
rows_indices = new_rows_indices;
columns_indices = new_columns_indices;
matrix_values = new_matrix_values;
}
// Count methods
// size_t count_diagonal_elements() const method
/// Returns the number of elements in the diagonal which are not zero.
/// This method is only defined for square matrices.
template <class T>
size_t SparseMatrix<T>::count_diagonal_elements() const
{
const size_t elements_number = matrix_values.size();
size_t count = 0;
for(size_t i = 0; i < elements_number; i++)
{
if(rows_indices[i] == columns_indices[i])
{
count++;
}
}
return count;
}
// size_t count_off_diagonal_elements() const method
/// Returns the number of elements outside the diagonal which are not zero.
/// This method is only defined for square matrices.
template <class T>
size_t SparseMatrix<T>::count_off_diagonal_elements() const
{
const size_t elements_number = matrix_values.size();
size_t count = 0;
for(size_t i = 0; i < elements_number; i++)
{
if(rows_indices[i] != columns_indices[i])
{
count++;
}
}
return count;
}
// size_t count_equal_to(const T&) const method
/// Returns the number of elements in the sparse matrix that are equal to a given value.
/// @param value Value to find.
template <class T>
size_t SparseMatrix<T>::count_equal_to(const T& value) const
{
size_t count = 0;
if(value == T())
{
count = rows_number*columns_number - matrix_values.size();
}
else
{
count = matrix_values.count_equal_to(value);
}
return count;
}
// size_t count_equal_to(const size_t&, const T&) const method
/// Returns the number of elements in a given column that are equal to a given value.
/// @param column_index Index of column.
/// @param value Value to find.
template <class T>
size_t SparseMatrix<T>::count_equal_to(const size_t& column_index, const T& value) const
{
#ifdef __OPENNN_DEBUG__
if(column_index > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "size_t count_equal_to(const size_t&, const T&) const method.\n"
<< "Column index(" << column_index << ") must be less than number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
size_t count = 0;
const Vector<size_t> column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
if(value == T())
{
count = rows_number - column_nonzero_indices.size();
}
else
{
count = matrix_values.get_subvector(column_nonzero_indices).count_equal_to(value);
}
return(count);
}
// size_t count_not_equal_to(const T&) const method
/// Returns the number of elements in the sparse matrix that are not equal to a given value.
/// @param value Value to find.
template <class T>
size_t SparseMatrix<T>::count_not_equal_to(const T& value) const
{
size_t count = 0;
if(value == T())
{
count = matrix_values.size();
}
else
{
count = rows_number*columns_number - matrix_values.count_equal_to(value);
}
return count;
}
// size_t count_not_equal_to(const size_t&, const T&) const method
/// Returns the number of elements in a given column that are not equal to a given value.
/// @param column_index Index of column.
/// @param value Value to find.
template <class T>
size_t SparseMatrix<T>::count_not_equal_to(const size_t& column_index, const T& value) const
{
#ifdef __OPENNN_DEBUG__
if(column_index > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "size_t count_not_equal_to(const size_t&, const T&) const method.\n"
<< "Column index(" << column_index << ") must be less than number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
size_t count = 0;
const Vector<size_t> column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
if(value == T())
{
count = column_nonzero_indices.size();
}
else
{
count = rows_number - matrix_values.get_subvector(column_nonzero_indices).count_equal_to(value);
}
return(count);
}
template <class T>
size_t SparseMatrix<T>::count_rows_equal_to(const Vector<size_t>& column_indices, const T& value) const
{
const size_t column_size = column_indices.size();
Vector< Vector<T> > columns(column_size);
for(size_t i = 0; i < column_size; i++)
{
columns[i] = get_column(column_indices[i]);
}
Vector<bool> found(rows_number, true);
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < column_size; j++)
{
if(columns[j][i] != value)
{
found[i] = false;
break;
}
}
}
return(found.count_equal_to(true));
}
template <class T>
bool SparseMatrix<T>::is_row_equal_to(const size_t& row_index, const Vector<size_t>& column_indices, const T& value) const
{
const size_t column_indices_size = column_indices.size();
const Vector<T> current_row = get_row(row_index);
for(size_t i = 0; i < column_indices_size; i++)
{
if(current_row[column_indices[i]] != value)
{
return(false);
}
}
return(true);
}
template <class T>
Vector<size_t> SparseMatrix<T>::get_row_indices_equal_to(const Vector<size_t>& column_indices, const T& value) const
{
const size_t column_size = column_indices.size();
Vector< Vector<T> > columns(column_size);
for(size_t i = 0; i < column_size; i++)
{
columns[i] = get_column(column_indices[i]);
}
Vector<bool> found(rows_number, true);
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < column_size; j++)
{
if(columns[j][i] != value)
{
found[i] = false;
break;
}
}
}
return(found.calculate_equal_to_indices(true));
}
/// Returns a sparse matrix with the values of given rows and columns from this sparse matrix.
/// @param row_indices Indices of sparse matrix rows.
/// @param column_indices Indices of sparse matrix columns.
template <class T>
SparseMatrix<T> SparseMatrix<T>::get_sub_sparse_matrix(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const
{
const size_t row_indices_size = row_indices.size();
const size_t column_indices_size = column_indices.size();
SparseMatrix<T> sub_sparse_matrix(row_indices_size, column_indices_size);
size_t row_index;
size_t column_index;
for(size_t i = 0; i < row_indices_size; i++)
{
row_index = row_indices[i];
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = column_indices[j];
const T current_value = (*this)(row_index,column_index);
if(current_value != T())
{
sub_sparse_matrix.set_element(i,j,current_value);
}
}
}
return(sub_sparse_matrix);
}
/// Returns a sub sparse matrix with the values of given rows from this sparse matrix.
/// @param row_indices Indices of sparse matrix rows.
template <class T>
SparseMatrix<T> SparseMatrix<T>::get_sub_sparse_matrix_rows(const Vector<size_t>& row_indices) const
{
const size_t row_indices_size = row_indices.size();
SparseMatrix<T> sub_sparse_matrix(row_indices_size, columns_number);
size_t row_index;
for(size_t i = 0; i < row_indices_size; i++)
{
row_index = row_indices[i];
for(size_t j = 0; j < columns_number; j++)
{
const T current_value = (*this)(row_index,j);
if(current_value != T())
{
sub_sparse_matrix.set_element(i,j,current_value);
}
}
}
return(sub_sparse_matrix);
}
/// Returns a sub sparse matrix with the values of given columns from this sparse matrix.
/// @param column_indices Indices of sparse matrix columns.
template <class T>
SparseMatrix<T> SparseMatrix<T>::get_sub_sparse_matrix_columns(const Vector<size_t>& column_indices) const
{
const size_t column_indices_size = column_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
for(size_t i = 0; i < column_indices_size; i++)
{
if(column_indices[i] >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> get_sub_sparse_matrix_columns(const Vector<size_t>&) const method.\n"
<< "Column index(" << i << ") must be less than number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
}
#endif
SparseMatrix<T> sub_sparse_matrix(rows_number, column_indices_size);
size_t column_index;
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = column_indices[j];
const T current_value = (*this)(i,column_index);
if(current_value != T())
{
sub_sparse_matrix.set_element(i,j,current_value);
}
}
}
return(sub_sparse_matrix);
}
// Vector<T> get_row(const size_t&) const method
/// Returns the row i of the sparse matrix.
/// @param i Index of row.
template <class T>
Vector<T> SparseMatrix<T>::get_row(const size_t& i) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(i >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> get_row(const size_t&) const method.\n"
<< "Row index (" << i << ") must be less than number of rows (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
Vector<T> row(columns_number, T());
const size_t rows_indices_size = rows_indices.size();
for(size_t j = 0; j < rows_indices_size; j++)
{
if(rows_indices[j] == i)
{
const size_t current_column_index = columns_indices[j];
row[current_column_index] = matrix_values[j];
}
}
return(row);
}
template <class T>
Vector<T> SparseMatrix<T>::get_rows(const size_t& first_index, const size_t& last_index) const
{
#ifdef __OPENNN_DEBUG__
if(last_index > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> get_rows(const size_t&, const size_t&) const method.\n"
<< "Last index(" << last_index << ") must be less than number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
Vector<T> new_row;
for(size_t i = first_index-1; i < last_index; i++)
{
new_row = new_row.assemble(get_row(i));
}
return new_row;
}
// Vector<T> get_row(const size_t&, const Vector<size_t>&) const method
/// Returns the row i of the sparse matrix, but only the elements specified by given indices.
/// @param row_index Index of row.
/// @param column_indices Column indices of row.
template <class T>
Vector<T> SparseMatrix<T>::get_row(const size_t& row_index, const Vector<size_t>& column_indices) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(row_index >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> get_row(const size_t&, const Vector<size_t>&) const method.\n"
<< "Row index (" << row_index << ") must be less than number of rows (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
const size_t size = column_indices.size();
Vector<T> row(size, T());
for(size_t i = 0; i < size; i++)
{
row[i] = (*this)(row_index,column_indices[i]);
}
return(row);
}
// Vector<T> get_column(const size_t&) const method
/// Returns the column j of the sparse matrix.
/// @param j Index of column.
template <class T>
Vector<T> SparseMatrix<T>::get_column(const size_t& j) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(j >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> get_column(const size_t&) const method.\n"
<< "Column index(" << j << ") must be less than number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
Vector<T> column(rows_number, T());
if(!columns_indices.contains(j))
{
return column;
}
const size_t columns_indices_size = columns_indices.size();
for(size_t i = 0; i < columns_indices_size; i++)
{
if(columns_indices[i] == j)
{
const size_t current_row_index = rows_indices[i];
column[current_row_index] = matrix_values[i];
}
}
return(column);
}
template <class T>
size_t SparseMatrix<T>::count_unique() const
{
size_t count = 0;
if(matrix_values.size() < rows_number*columns_number &&
!matrix_values.contains(T()))
{
count++;
}
count += matrix_values.count_unique();
return count;
}
template <class T>
Vector<T> SparseMatrix<T>::get_diagonal() const
{
Vector<T> diagonal(rows_number, T());
const size_t values_number = rows_indices.size();
for(size_t i = 0; i < values_number; i++)
{
if(rows_indices[i] == columns_indices[i])
{
diagonal[rows_indices[i]] = matrix_values[i];
}
}
return diagonal;
}
template <class T>
void SparseMatrix<T>::set_row(const size_t& row_index, const Vector<T>& new_row)
{
const size_t size = new_row.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(row_index >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_row(const size_t&, const Vector<T>&) method.\n"
<< "Index must be less than number of rows.\n";
throw logic_error(buffer.str());
}
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_row(const size_t&, const Vector<T>&) method.\n"
<< "Size(" << size << ") must be equal to number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
if(rows_indices.size() != 0)
{
const Vector<size_t> row_occurrences_indices = rows_indices.calculate_equal_to_indices(row_index);
rows_indices = rows_indices.delete_indices(row_occurrences_indices);
columns_indices = columns_indices.delete_indices(row_occurrences_indices);
matrix_values = matrix_values.delete_indices(row_occurrences_indices);
}
for(size_t i = 0; i < size; i++)
{
if(new_row[i] != T())
{
rows_indices.push_back(row_index);
columns_indices.push_back(i);
matrix_values.push_back(new_row[i]);
}
}
}
template <class T>
void SparseMatrix<T>::set_row(const size_t& row_index, const T& value)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(row_index >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_row(const size_t&, const T&) method.\n"
<< "Index must be less than number of rows.\n";
throw logic_error(buffer.str());
}
#endif
if(rows_indices.size() != 0)
{
const Vector<size_t> row_occurrences_indices = rows_indices.calculate_equal_to_indices(row_index);
rows_indices = rows_indices.delete_indices(row_occurrences_indices);
columns_indices = columns_indices.delete_indices(row_occurrences_indices);
matrix_values = matrix_values.delete_indices(row_occurrences_indices);
}
if(value == T())
{
return;
}
for(size_t i = 0; i < columns_number; i++)
{
rows_indices.push_back(row_index);
columns_indices.push_back(i);
matrix_values.push_back(value);
}
}
template <class T>
void SparseMatrix<T>::set_column(const size_t& column_index, const Vector<T>& new_column)
{
const size_t size = new_column.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_column(const size_t&, const Vector<T>&).\n"
<< "Index(" << column_index << ") must be less than number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_column(const size_t&, const Vector<T>&).\n"
<< "Size must be equal to number of rows.\n";
throw logic_error(buffer.str());
}
#endif
if(columns_indices.size() != 0)
{
const Vector<size_t> column_occurrences_indices = columns_indices.calculate_equal_to_indices(column_index);
rows_indices = rows_indices.delete_indices(column_occurrences_indices);
columns_indices = columns_indices.delete_indices(column_occurrences_indices);
matrix_values = matrix_values.delete_indices(column_occurrences_indices);
}
for(size_t i = 0; i < size; i++)
{
if(new_column[i] != T())
{
rows_indices.push_back(i);
columns_indices.push_back(column_index);
matrix_values.push_back(new_column[i]);
}
}
}
template <class T>
void SparseMatrix<T>::set_column(const size_t& column_index, const T& value)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_column(const size_t&, const T&).\n"
<< "Index must be less than number of columns.\n";
throw logic_error(buffer.str());
}
#endif
if(columns_indices.size() != 0)
{
const Vector<size_t> column_occurrences_indices = columns_indices.calculate_equal_to_indices(column_index);
rows_indices = rows_indices.delete_indices(column_occurrences_indices);
columns_indices = columns_indices.delete_indices(column_occurrences_indices);
matrix_values = matrix_values.delete_indices(column_occurrences_indices);
}
if(value == T())
{
return;
}
for(size_t i = 0; i < rows_number; i++)
{
rows_indices.push_back(i);
columns_indices.push_back(column_index);
matrix_values.push_back(value);
}
}
template <class T>
void SparseMatrix<T>::set_diagonal(const T& new_diagonal)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_diagonal(const T&).\n"
<< "SparseMatrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
const size_t previous_nonzero_values_number = matrix_values.size();
if(previous_nonzero_values_number != 0)
{
Vector<size_t> diagonal_indices;
for(size_t i = 0; i < previous_nonzero_values_number; i++)
{
if(rows_indices[i] == columns_indices[i])
{
diagonal_indices.push_back(i);
}
}
rows_indices = rows_indices.delete_indices(diagonal_indices);
columns_indices = columns_indices.delete_indices(diagonal_indices);
matrix_values = matrix_values.delete_indices(diagonal_indices);
}
if(new_diagonal == T())
{
return;
}
for(size_t i = 0; i < rows_number; i++)
{
rows_indices.push_back(i);
columns_indices.push_back(i);
matrix_values.push_back(new_diagonal);
}
}
template <class T>
void SparseMatrix<T>::set_diagonal(const Vector<T>& new_diagonal)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_diagonal(const Vector<T>&) const.\n"
<< "SparseMatrix is not square.\n";
throw logic_error(buffer.str());
}
const size_t size = new_diagonal.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "set_diagonal(const Vector<T>&) const.\n"
<< "Size of diagonal(" << size << ") is not equal to size of Sparsematrix (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
const size_t previous_nonzero_values_number = matrix_values.size();
if(previous_nonzero_values_number != 0)
{
Vector<size_t> diagonal_indices;
for(size_t i = 0; i < previous_nonzero_values_number; i++)
{
if(rows_indices[i] == columns_indices[i])
{
diagonal_indices.push_back(i);
}
}
rows_indices = rows_indices.delete_indices(diagonal_indices);
columns_indices = columns_indices.delete_indices(diagonal_indices);
matrix_values = matrix_values.delete_indices(diagonal_indices);
}
for(size_t i = 0; i < rows_number; i++)
{
if(new_diagonal[i] != T())
{
rows_indices.push_back(i);
columns_indices.push_back(i);
matrix_values.push_back(new_diagonal[i]);
}
}
}
template <class T>
void SparseMatrix<T>::initialize_diagonal(const T& value)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number || rows_number == 0)
{
ostringstream buffer;
cout << "OpenNN Exception: SparseMatrix Template.\n"
<< "initialize_diagonal(const T&) const method.\n"
<< "SparseMatrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
initialize_diagonal(rows_number,value);
}
template <class T>
void SparseMatrix<T>::initialize_diagonal(const size_t& new_size, const T& new_value)
{
set(new_size,new_size);
set_diagonal(new_value);
}
template <class T>
void SparseMatrix<T>::initialize_diagonal(const size_t& new_size, const Vector<T>& new_values)
{
set(new_size,new_size);
set_diagonal(new_values);
}
template <class T>
void SparseMatrix<T>::initialize_identity()
{
initialize_diagonal(rows_number,1);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::sum_diagonal(const T& value) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "sum_diagonal(const T&) const.\n"
<< "SparseMatrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> sum(*this);
Vector<T> new_diagonal(rows_number,T());
const size_t nonzero_elements_number = matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
if(rows_indices[i] == columns_indices[i])
{
new_diagonal[rows_indices[i]] = matrix_values[i] + value;
}
}
for(size_t i = 0; i < rows_number; i++)
{
if(new_diagonal[i] == T())
{
new_diagonal[i] = value;
}
}
sum.set_diagonal(new_diagonal);
return(sum);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::sum_diagonal(const Vector<T>& values) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "sum_diagonal(const Vector<T>&) const.\n"
<< "SparseMatrix must be square.\n";
throw logic_error(buffer.str());
}
const size_t size = values.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "sum_diagonal(const Vector<T>&) const.\n"
<< "Size must be equal to number of rows.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> sum(*this);
Vector<T> new_diagonal(rows_number,T());
const size_t nonzero_elements_number = matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
if(rows_indices[i] == columns_indices[i])
{
new_diagonal[rows_indices[i]] = matrix_values[i] + values[rows_indices[i]];
}
}
for(size_t i = 0; i < rows_number; i++)
{
if(new_diagonal[i] == T())
{
new_diagonal[i] = values[rows_indices[i]];
}
}
sum.set_diagonal(new_diagonal);
return(sum);
}
template <class T>
void SparseMatrix<T>::append_row(const Vector<T>& new_row)
{
const size_t size = new_row.size();
#ifdef __OPENNN_DEBUG__
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "append_row(const Vector<T>&) const.\n"
<< "Size(" << size << ") must be equal to number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
rows_number += 1;
for(size_t i = 0; i < size; i++)
{
if(new_row[i] != T())
{
rows_indices.push_back(rows_number-1);
columns_indices.push_back(i);
matrix_values.push_back(new_row[i]);
}
}
}
template <class T>
void SparseMatrix<T>::append_column(const Vector<T>& new_column)
{
const size_t size = new_column.size();
#ifdef __OPENNN_DEBUG__
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "append_column(const Vector<T>&) const.\n"
<< "Size(" << size << ") must be equal to number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
columns_number += 1;
for(size_t i = 0; i < size; i++)
{
if(new_column[i] != T())
{
rows_indices.push_back(i);
columns_indices.push_back(columns_number-1);
matrix_values.push_back(new_column[i]);
}
}
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::insert_row(const size_t& position, const Vector<T>& new_row) const
{
const size_t size = new_row.size();
#ifdef __OPENNN_DEBUG__
if(position > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "insert_row(const size_t&, const Vector<T>&) const.\n"
<< "Position must be less or equal than number of rows.\n";
throw logic_error(buffer.str());
}
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "insert_row(const size_t&, const Vector<T>&) const.\n"
<< "Size must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_rows_number(rows_number+1);
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
const Vector<size_t> greater_rows_than_position_indices = new_rows_indices.calculate_greater_equal_to_indices(position);
const size_t greater_rows_than_position_number = greater_rows_than_position_indices.size();
for(size_t i = 0; i < greater_rows_than_position_number; i++)
{
new_rows_indices[greater_rows_than_position_indices[i]] += 1;
}
for(size_t i = 0; i < size; i ++)
{
if(new_row[i] != T())
{
new_rows_indices.push_back(position);
new_columns_indices.push_back(i);
new_matrix_values.push_back(new_row[i]);
}
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::insert_column(const size_t& position, const Vector<T>& new_column)
{
const size_t size = new_column.size();
#ifdef __OPENNN_DEBUG__
if(position > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "insert_column(const size_t&, const Vector<T>&) const.\n"
<< "Position must be less or equal than number of columns.\n";
throw logic_error(buffer.str());
}
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "insert_column(const size_t, const Vector<T>&) const.\n"
<< "Size(" << size << ") must be equal to number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_columns_number(columns_number+1);
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
const Vector<size_t> greater_columns_than_position_indices = new_columns_indices.calculate_greater_equal_to_indices(position);
const size_t greater_columns_than_position_number = greater_columns_than_position_indices.size();
for(size_t i = 0; i < greater_columns_than_position_number; i++)
{
new_columns_indices[greater_columns_than_position_indices[i]] += 1;
}
for(size_t i = 0; i < size; i ++)
{
if(new_column[i] != T())
{
new_rows_indices.push_back(i);
new_columns_indices.push_back(position);
new_matrix_values.push_back(new_column[i]);
}
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::merge_matrices(const SparseMatrix<T>& other_sparse_matrix, const size_t& columns_1_index, const size_t& columns_2_index) const
{
const Vector<T> columns_1 = this->get_column(columns_1_index);
const Vector<T> columns_2 = other_sparse_matrix.get_column(columns_2_index);
const size_t columns_1_size = columns_1.size();
size_t merged_rows_number = 0;
for(size_t i = 0; i < columns_1_size; i++)
{
const T current_index_value = columns_1[i];
const size_t columns_2_equal_number = columns_2.count_equal_to(current_index_value);
merged_rows_number += columns_2_equal_number;
}
const size_t merged_columns_number = columns_number + other_sparse_matrix.get_columns_number() - 1;
SparseMatrix<T> merged_sparse_matrix(merged_rows_number, merged_columns_number);
size_t current_row_index = 0;
for(size_t i = 0; i < columns_1_size; i++)
{
const T current_index_value = columns_1[i];
const Vector<size_t> columns_2_equal_indices = columns_2.calculate_equal_to_indices(current_index_value);
const size_t columns_2_equal_indices_size = columns_2_equal_indices.size();
for(size_t j = 0; j < columns_2_equal_indices_size; j++)
{
Vector<T> current_row = this->get_row(i);
current_row = current_row.assemble(other_sparse_matrix.get_row(columns_2_equal_indices[j]).remove_element(columns_2_index));
merged_sparse_matrix.set_row(current_row_index, current_row);
current_row_index++;
}
}
return merged_sparse_matrix;
}
template <class T>
Matrix<T> SparseMatrix<T>::merge_matrices(const Matrix<T>& other_matrix, const size_t& columns_1_index, const size_t& columns_2_index) const
{
const Vector<T> columns_1 = this->get_column(columns_1_index);
const Vector<T> columns_2 = other_matrix.get_column(columns_2_index);
const size_t columns_1_size = columns_1.size();
size_t merged_rows_number = 0;
for(size_t i = 0; i < columns_1_size; i++)
{
const T current_index_value = columns_1[i];
const size_t columns_2_equal_number = columns_2.count_equal_to(current_index_value);
merged_rows_number += columns_2_equal_number;
}
const size_t merged_columns_number = columns_number + other_matrix.get_columns_number() - 1;
Matrix<T> merged_matrix(merged_rows_number, merged_columns_number);
size_t current_row_index = 0;
for(size_t i = 0; i < columns_1_size; i++)
{
const T current_index_value = columns_1[i];
const Vector<size_t> columns_2_equal_indices = columns_2.calculate_equal_to_indices(current_index_value);
const size_t columns_2_equal_indices_size = columns_2_equal_indices.size();
for(size_t j = 0; j < columns_2_equal_indices_size; j++)
{
Vector<T> current_row = this->get_row(i);
current_row = current_row.assemble(other_matrix.get_row(columns_2_equal_indices[j]).remove_element(columns_2_index));
merged_matrix.set_row(current_row_index, current_row);
current_row_index++;
}
}
return merged_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_row(const size_t& row_index) const
{
#ifdef __OPENNN_DEBUG__
if(row_index >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> delete_row(const size_t&) const.\n"
<< "Index of row must be less than number of rows.\n";
throw logic_error(buffer.str());
}
else if(rows_number < 2)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> delete_row(const size_t&) const.\n"
<< "Number of rows must be equal or greater than two.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(rows_number-1,columns_number);
Vector<size_t> new_rows_indices = rows_indices;
Vector<size_t> new_columns_indices = columns_indices;
Vector<T> new_matrix_values = matrix_values;
const size_t nonzero_elements_number = new_matrix_values.size();
Vector<size_t> indices_to_remove;
for(size_t j = 0; j < nonzero_elements_number; j++)
{
if(new_rows_indices[j] == row_index)
{
indices_to_remove.push_back(j);
}
else if(new_rows_indices[j] > row_index)
{
new_rows_indices[j]--;
}
}
new_rows_indices = new_rows_indices.delete_indices(indices_to_remove);
new_columns_indices = new_columns_indices.delete_indices(indices_to_remove);
new_matrix_values = new_matrix_values.delete_indices(indices_to_remove);
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_rows(const Vector<size_t>& rows_to_remove) const
{
const size_t rows_to_remove_size = rows_to_remove.size();
SparseMatrix<T> new_sparse_matrix(rows_number-rows_to_remove_size, columns_number);
Vector<size_t> indices_to_keep = rows_indices.calculate_not_equal_to_indices(rows_to_remove);
const size_t indices_to_keep_size = indices_to_keep.size();
Vector<size_t> new_rows_indices(indices_to_keep_size);
Vector<size_t> new_columns_indices(indices_to_keep_size);
Vector<T> new_matrix_values(indices_to_keep_size);
for(size_t i = 0; i < indices_to_keep_size; i++)
{
const size_t current_index = indices_to_keep[i];
new_rows_indices[i] = rows_indices[current_index];
new_columns_indices[i] = columns_indices[current_index];
new_matrix_values[i] = matrix_values[current_index];
}
const size_t nonzero_elements_number = new_matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
new_rows_indices[i] -= rows_to_remove.count_less_equal_to(new_rows_indices[i]);
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_rows_with_value(const T& value) const
{
Vector<size_t> valid_indices;
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i);
if(!current_row.contains(value))
{
valid_indices.push_back(i);
}
}
return get_sub_sparse_matrix_rows(valid_indices);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_first_rows(const size_t& number) const
{
const Vector<size_t> indices(number, 1, rows_number-1);
return get_sub_sparse_matrix_rows(indices);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::get_first_rows(const size_t& number) const
{
const Vector<size_t> indices(0, 1, number-1);
return get_sub_sparse_matrix_rows(indices);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_last_rows(const size_t& number) const
{
const Vector<size_t> indices(0, 1, rows_number-number-1);
return get_sub_sparse_matrix_rows(indices);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::get_last_rows(const size_t& number) const
{
const Vector<size_t> indices(rows_number-number, 1, rows_number-1);
return get_sub_sparse_matrix_rows(indices);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_column(const size_t& column_index) const
{
#ifdef __OPENNN_DEBUG__
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "delete_column(const size_t&) const.\n"
<< "Index of column must be less than number of columns.\n";
throw logic_error(buffer.str());
}
else if(columns_number < 2)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "delete_column(const size_t&) const.\n"
<< "Number of columns must be equal or greater than two.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_columns_number(columns_number-1);
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
const Vector<size_t> columns_equal_than_position_indices = new_columns_indices.calculate_equal_to_indices(column_index);
new_rows_indices = new_rows_indices.delete_indices(columns_equal_than_position_indices);
new_columns_indices = new_columns_indices.delete_indices(columns_equal_than_position_indices);
new_matrix_values = new_matrix_values.delete_indices(columns_equal_than_position_indices);
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::delete_columns(const Vector<size_t>& columns_to_remove) const
{
const size_t rows_to_remove_size = columns_to_remove.size();
SparseMatrix<T> new_sparse_matrix(rows_number-rows_to_remove_size, columns_number);
Vector<size_t> indices_to_keep = rows_indices.calculate_not_equal_to_indices(columns_to_remove);
const size_t indices_to_keep_size = indices_to_keep.size();
Vector<size_t> new_rows_indices(indices_to_keep_size);
Vector<size_t> new_columns_indices(indices_to_keep_size);
Vector<T> new_matrix_values(indices_to_keep_size);
for(size_t i = 0; i < indices_to_keep_size; i++)
{
const size_t current_index = indices_to_keep[i];
new_rows_indices[i] = rows_indices[current_index];
new_columns_indices[i] = columns_indices[current_index];
new_matrix_values[i] = matrix_values[current_index];
}
const size_t nonzero_elements_number = new_matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
new_columns_indices[i] -= columns_to_remove.count_less_equal_to(new_columns_indices[i]);
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::remove_constant_rows() const
{
Vector<size_t> rows_to_remove;
for(size_t i = 0; i < rows_number; i++)
{
const size_t occurrences_number = rows_indices.count_equal_to(i);
if(occurrences_number == 0)
{
rows_to_remove.push_back(i);
continue;
}
const Vector<T> row = get_row(i);
if(row.is_constant())
{
rows_to_remove.push_back(i);
}
}
return delete_rows(rows_to_remove);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::remove_constant_columns() const
{
Vector<size_t> columns_to_remove;
for(size_t i = 0; i < columns_number; i++)
{
const size_t occurrences_number = columns_indices.count_equal_to(i);
if(occurrences_number == 0)
{
columns_to_remove.push_back(i);
continue;
}
const Vector<T> column = get_column(i);
if(column.is_constant())
{
columns_to_remove.push_back(i);
}
}
return delete_columns(columns_to_remove);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::assemble_rows(const SparseMatrix<T>& other_sparse_matrix) const
{
#ifdef __OPENNN_DEBUG__
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> assemble_rows(const SparseMatrix<T>&) const method.\n"
<< "Number of columns of other Sparsematrix (" << other_columns_number << ") must be equal to number of columns of this Sparsematrix (" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_rows_number(rows_number + other_sparse_matrix.get_rows_number());
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
Vector<size_t> other_rows_indices = other_sparse_matrix.get_rows_indices();
Vector<size_t> other_columns_indices = other_sparse_matrix.get_columns_indices();
Vector<size_t> other_matrix_values= other_sparse_matrix.get_matrix_values();
other_rows_indices = other_rows_indices + rows_number;
new_rows_indices = new_rows_indices.assemble(other_rows_indices);
new_columns_indices = new_columns_indices.assemble(other_columns_indices);
new_matrix_values = new_matrix_values.assemble(other_matrix_values);
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
Matrix<T> SparseMatrix<T>::assemble_rows(const Matrix<T>& other_matrix) const
{
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
#ifdef __OPENNN_DEBUG__
if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Matrix<T> assemble_rows(const Matrix<T>&) const method.\n"
<< "Number of columns of other matrix (" << other_columns_number << ") must be equal to number of columns of this Sparsematrix (" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_rows_number(rows_number + other_matrix.get_rows_number());
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
for(size_t i = 0; i < other_rows_number; i++)
{
for(size_t j = 0; j < other_columns_number; j++)
{
if(other_matrix(i,j) != T())
{
new_rows_indices.push_back(rows_number + i);
new_columns_indices.push_back(j);
new_matrix_values.push_back(other_matrix(i,j));
}
}
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::assemble_columns(const SparseMatrix<T>& other_sparse_matrix) const
{
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> assemble_columns(const SparseMatrix<T>&) const method.\n"
<< "Number of rows of other Sparsematrix (" << other_rows_number << ") must be equal to number of rows of this Sparsematrix (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_rows_number(columns_number + other_sparse_matrix.get_columns_number());
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
Vector<size_t> other_rows_indices = other_sparse_matrix.get_rows_indices();
Vector<size_t> other_columns_indices = other_sparse_matrix.get_columns_indices();
Vector<size_t> other_matrix_values= other_sparse_matrix.get_matrix_values();
other_columns_indices = other_columns_indices + columns_number;
new_rows_indices = new_rows_indices.assemble(other_rows_indices);
new_columns_indices = new_columns_indices.assemble(other_columns_indices);
new_matrix_values = new_matrix_values.assemble(other_matrix_values);
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
Matrix<T> SparseMatrix<T>::assemble_columns(const Matrix<T>& other_matrix) const
{
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
#ifdef __OPENNN_DEBUG__
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Matrix<T> assemble_columns(const Matrix<T>&) const method.\n"
<< "Number of rows of other matrix (" << other_rows_number << ") must be equal to number of rows of this Sparsematrix (" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> new_sparse_matrix(*this);
new_sparse_matrix.set_rows_number(rows_number + other_matrix.get_rows_number());
Vector<size_t> new_rows_indices = new_sparse_matrix.get_rows_indices();
Vector<size_t> new_columns_indices = new_sparse_matrix.get_columns_indices();
Vector<T> new_matrix_values = new_sparse_matrix.get_matrix_values();
for(size_t i = 0; i < other_rows_number; i++)
{
for(size_t j = 0; j < other_columns_number; j++)
{
if(other_matrix(i,j) != T())
{
new_rows_indices.push_back(i);
new_columns_indices.push_back(columns_number + j);
new_matrix_values.push_back(other_matrix(i,j));
}
}
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::sort_ascending(const size_t& column_index) const
{
Vector<T> current_column = get_column(column_index);
Vector<size_t> sorted_indices = current_column.sort_ascending_indices();
SparseMatrix<T> new_sparse_matrix(rows_number, columns_number);
Vector<size_t> new_rows_indices = this->get_rows_indices();
Vector<size_t> new_columns_indices = this->get_columns_indices();
Vector<T> new_matrix_values = this->get_matrix_values();
const size_t nonzero_elements_number = new_matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
const size_t sorted_index = sorted_indices.calculate_equal_to_indices(new_rows_indices[i])[0];
new_rows_indices[i] = sorted_index;
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::sort_descending(const size_t& column_index) const
{
Vector<T> current_column = get_column(column_index);
Vector<size_t> sorted_indices = current_column.sort_descending_indices();
SparseMatrix<T> new_sparse_matrix(rows_number, columns_number);
Vector<size_t> new_rows_indices = this->get_rows_indices();
Vector<size_t> new_columns_indices = this->get_columns_indices();
Vector<T> new_matrix_values = this->get_matrix_values();
const size_t nonzero_elements_number = new_matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
const size_t sorted_index = sorted_indices.calculate_equal_to_indices(new_rows_indices[i])[0];
new_rows_indices[i] = sorted_index;
}
new_sparse_matrix.set_values(new_rows_indices, new_columns_indices, new_matrix_values);
return new_sparse_matrix;
}
template <class T>
void SparseMatrix<T>::replace(const T& find_what, const T& replace_with)
{
if(find_what == replace_with)
{
return;
}
else if(find_what != T() && replace_with != T())
{
matrix_values = matrix_values.replace_value(find_what, replace_with);
}
else if(find_what == T())
{
matrix_values = matrix_values.replace_value(find_what, replace_with);
const Vector<size_t> sequential_indices(0,1,columns_indices);
for(size_t i = 0; i < rows_number; i++)
{
const Vector<size_t> current_row_nonzero_indices = rows_indices.calculate_equal_to_indices(i);
const Vector<size_t> current_zero_columns = sequential_indices.get_difference(columns_indices.get_subvector(current_row_nonzero_indices));
const size_t current_zero_columns_size = current_zero_columns.size();
for(size_t j = 0; j < current_zero_columns_size; j++)
{
rows_indices.push_back(i);
columns_indices.push_back(current_zero_columns[j]);
matrix_values.push_back(replace_with);
}
}
}
else if(replace_with == T())
{
const Vector<size_t> equal_than_find_indices = matrix_values.calculate_equal_to_indices(find_what);
rows_indices = rows_indices.delete_indices(equal_than_find_indices);
columns_indices = columns_indices.delete_indices(equal_than_find_indices);
matrix_values = matrix_values.delete_indices(equal_than_find_indices);
}
}
template <class T>
void SparseMatrix<T>::replace_in_row(const size_t& row_index, const T& find_what, const T& replace_with)
{
if(find_what == replace_with)
{
return;
}
else if(find_what != T() && replace_with != T())
{
const Vector<size_t> current_row_nonzero_indices = rows_indices.calculate_equal_to_indices(row_index);
const size_t current_row_nonzero_number = current_row_nonzero_indices.size();
for(size_t i = 0; i < current_row_nonzero_number; i++)
{
if(matrix_values[current_row_nonzero_indices[i]] == find_what)
{
matrix_values[current_row_nonzero_indices[i]] == replace_with;
}
}
}
else if(find_what == T())
{
const Vector<size_t> current_row_nonzero_indices = rows_indices.calculate_equal_to_indices(row_index);
const size_t current_row_nonzero_number = current_row_nonzero_indices.size();
for(size_t i = 0; i < current_row_nonzero_number; i++)
{
if(matrix_values[current_row_nonzero_indices[i]] == find_what)
{
matrix_values[current_row_nonzero_indices[i]] == replace_with;
}
}
const Vector<size_t> sequential_indices(0,1,columns_indices);
const Vector<size_t> current_zero_columns = sequential_indices.get_difference(columns_indices.get_subvector(current_row_nonzero_indices));
const size_t current_zero_columns_size = current_zero_columns.size();
for(size_t j = 0; j < current_zero_columns_size; j++)
{
rows_indices.push_back(row_index);
columns_indices.push_back(current_zero_columns[j]);
matrix_values.push_back(replace_with);
}
}
else if(replace_with == T())
{
const Vector<size_t> current_row_nonzero_indices = rows_indices.calculate_equal_to_indices(row_index);
const Vector<size_t> equal_than_find_indices = matrix_values.get_subvector(current_row_nonzero_indices).calculate_equal_to_indices(find_what);
rows_indices = rows_indices.delete_indices(equal_than_find_indices);
columns_indices = columns_indices.delete_indices(equal_than_find_indices);
matrix_values = matrix_values.delete_indices(equal_than_find_indices);
}
}
template <class T>
void SparseMatrix<T>::replace_in_column(const size_t& column_index, const T& find_what, const T& replace_with)
{
if(find_what == replace_with)
{
return;
}
else if(find_what != T() && replace_with != T())
{
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
const size_t current_column_nonzero_number = current_column_nonzero_indices.size();
for(size_t i = 0; i < current_column_nonzero_number; i++)
{
if(matrix_values[current_column_nonzero_indices[i]] == find_what)
{
matrix_values[current_column_nonzero_indices[i]] == replace_with;
}
}
}
else if(find_what == T())
{
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
const size_t current_column_nonzero_number = current_column_nonzero_indices.size();
for(size_t i = 0; i < current_column_nonzero_number; i++)
{
if(matrix_values[current_column_nonzero_indices[i]] == find_what)
{
matrix_values[current_column_nonzero_indices[i]] == replace_with;
}
}
const Vector<size_t> sequential_indices(0,1,rows_indices);
const Vector<size_t> current_zero_rows = sequential_indices.get_difference(rows_indices.get_subvector(current_column_nonzero_indices));
const size_t current_zero_rows_size = current_zero_rows.size();
for(size_t j = 0; j < current_zero_rows_size; j++)
{
rows_indices.push_back(current_zero_rows[j]);
columns_indices.push_back(column_index);
matrix_values.push_back(replace_with);
}
}
else if(replace_with == T())
{
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
const Vector<size_t> equal_than_find_indices = matrix_values.get_subvector(current_column_nonzero_indices).calculate_equal_to_indices(find_what);
rows_indices = rows_indices.delete_indices(equal_than_find_indices);
columns_indices = columns_indices.delete_indices(equal_than_find_indices);
matrix_values = matrix_values.delete_indices(equal_than_find_indices);
}
}
template <class T>
bool SparseMatrix<T>::has_column_value(const size_t& column_index, const T& value) const
{
return get_column(column_index).contains(value);
}
// Mathematical methods
template <class T>
T SparseMatrix<T>::calculate_sum() const
{
return matrix_values.calculate_sum();
}
template <class T>
Vector<int> SparseMatrix<T>::calculate_rows_sum_int() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
cout << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<int> calculate_rows_sum_int() const method.\n"
<< "SparseMatrix is empty.\n";
throw logic_error(buffer.str());
}
#endif
Vector<T> rows_sum(rows_number, T());
const size_t rows_indices_size = rows_indices.size();
for(size_t i = 0; i < rows_indices_size; i++)
{
const size_t current_row_index = rows_indices[i];
rows_sum[current_row_index] += (int)matrix_values[i];
}
return rows_sum;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_rows_sum() const
{
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
cout << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> calculate_rows_sum() const method.\n"
<< "SparseMatrix is empty.\n";
throw logic_error(buffer.str());
}
#endif
Vector<T> rows_sum(rows_number, T());
const size_t rows_indices_size = rows_indices.size();
for(size_t i = 0; i < rows_indices_size; i++)
{
const size_t current_row_index = rows_indices[i];
rows_sum[current_row_index] += matrix_values[i];
}
return rows_sum;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_columns_sum() const
{
#ifdef __OPENNN_DEBUG__
if(columns_number == 0)
{
ostringstream buffer;
cout << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> calculate_columns_sum() const method.\n"
<< "SparseMatrix is empty.\n";
throw logic_error(buffer.str());
}
#endif
Vector<T> columns_sum(columns_number, T());
const size_t columns_indices_size = columns_indices.size();
for(size_t i = 0; i < columns_indices_size; i++)
{
const size_t current_column_index = columns_indices[i];
columns_sum[current_column_index] += matrix_values[i];
}
return columns_sum;
}
template <class T>
Vector<size_t> SparseMatrix<T>::calculate_most_frequent_columns_indices(const size_t& top_number)
{
const Vector<T> columns_sum = calculate_columns_sum();
return columns_sum.calculate_maximal_indices(min(columns_number,top_number));
}
template <class T>
void SparseMatrix<T>::sum_row(const size_t& row_index, const Vector<T>& vector)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(vector.size() != columns_number)
{
ostringstream buffer;
cout << "OpenNN Exception: SparseMatrix Template.\n"
<< "void sum_row(const size_t&, const Vector<T>&) method.\n"
<< "Size of vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
const Vector<T> current_row = get_row(row_index) + vector;
set_row(row_index, current_row);
}
template <class T>
double SparseMatrix<T>::calculate_trace() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(!is_square())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "double calculate_trace() const method.\n"
<< "SparseMatrix is not square.\n";
throw logic_error(buffer.str());
}
#endif
double trace = 0.0;
for(size_t i = 0; i < rows_number; i++)
{
trace += (*this)(i,i);
}
return(trace);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_mean() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean() const method.\n"
<< "Number of rows must be greater than one.\n";
throw logic_error(buffer.str());
}
#endif
// Mean
Vector<double> mean(columns_number, 0.0);
mean = calculate_columns_sum();
mean /= (double)rows_number;
return(mean);
}
template <class T>
double SparseMatrix<T>::calculate_mean(const size_t& column_index) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "double calculate_mean(const size_t&) const method.\n"
<< "Number of rows must be greater than one.\n";
throw logic_error(buffer.str());
}
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "double calculate_mean(const size_t&) const method.\n"
<< "Index of column must be less than number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Mean
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
const size_t current_column_nonzero_number = current_column_nonzero_indices.size();
double mean = 0.0;
for(size_t i = 0; i < current_column_nonzero_number; i++)
{
mean += matrix_values[current_column_nonzero_indices[i]];
}
mean /= (double)rows_number;
return(mean);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_mean(const Vector<size_t>& means_column_indices) const
{
const size_t means_column_size = means_column_indices.size();
Vector<double> mean(means_column_size, 0.0);
for(size_t i = 0; i < means_column_size; i++)
{
mean = calculate_mean(means_column_indices[i]);
}
return mean;
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_mean(const Vector<size_t>& means_row_indices, const Vector<size_t>& means_column_indices) const
{
const size_t row_indices_size = means_row_indices.size();
const size_t column_indices_size = means_column_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
// Rows check
if(row_indices_size > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Size of row indices(" << row_indices_size << ") is greater than number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < row_indices_size; i++)
{
if(means_row_indices[i] >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Row index " << i << " must be less than rows number.\n";
throw logic_error(buffer.str());
}
}
if(row_indices_size == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Size of row indices must be greater than zero.\n";
throw logic_error(buffer.str());
}
// Columns check
if(column_indices_size > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Column indices size must be equal or less than columns number.\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < column_indices_size; i++)
{
if(means_column_indices[i] >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Column index " << i << " must be less than columns number.\n";
throw logic_error(buffer.str());
}
}
#endif
size_t row_index;
size_t column_index;
// Mean
Vector<double> mean(column_indices_size, 0.0);
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = means_column_indices[j];
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
const size_t current_column_nonzero_number = current_column_nonzero_indices.size();
for(size_t i = 0; i < current_column_nonzero_number; i++)
{
row_index = rows_indices[current_column_nonzero_indices[i]];
if(means_row_indices.contains(row_index))
{
mean[j] += matrix_values[current_column_nonzero_indices[i]];
}
}
mean[j] /= (double)row_indices_size;
}
return(mean);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_mean_missing_values(const Vector< Vector<size_t> >& missing_indices) const
{
Vector<size_t> row_indices(0, 1, rows_number-1);
Vector<size_t> column_indices(0, 1, columns_number-1);
return(calculate_mean_missing_values(row_indices, column_indices, missing_indices));
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_mean_missing_values(const Vector<size_t>& means_row_indices, const Vector<size_t>& means_column_indices,
const Vector< Vector<size_t> >& missing_indices) const
{
const size_t column_indices_size = means_column_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t row_indices_size = means_row_indices.size();
// Rows check
if(row_indices_size > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Size of row indices(" << row_indices_size << ") is greater than number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < row_indices_size; i++)
{
if(means_row_indices[i] >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, Vector< Vector<size_t> >&) const method.\n"
<< "Row index " << i << " must be less than rows number.\n";
throw logic_error(buffer.str());
}
}
if(row_indices_size == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Size of row indices must be greater than zero.\n";
throw logic_error(buffer.str());
}
// Columns check
if(column_indices_size > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Column indices size must be equal or less than columns number.\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < column_indices_size; i++)
{
if(means_column_indices[i] >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Column index " << i << " must be less than columns number.\n";
throw logic_error(buffer.str());
}
}
#endif
// Mean
Vector<double> mean(column_indices_size, 0.0);
Vector< Vector<size_t> > used_rows(column_indices_size);
#pragma omp parallel for schedule(dynamic)
for(int i = 0; i < (int)column_indices_size; i++)
{
used_rows[i] = means_row_indices.get_difference(missing_indices[i]);
}
#pragma omp parallel for schedule(dynamic)
for(int j = 0; j < (int)column_indices_size; j++)
{
const size_t column_index = means_column_indices[j];
const size_t current_rows_number = used_rows[j].size();
for(size_t i = 0; i < current_rows_number; i++)
{
const size_t row_index = used_rows[j][i];
mean[j] += (*this)(row_index,column_index);
}
if(current_rows_number != 0)
{
mean[j] /= (double)current_rows_number;
}
}
return(mean);
}
template <class T>
Vector< Vector<double> > SparseMatrix<T>::calculate_mean_standard_deviation() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_mean_standard_deviation() const method.\n"
<< "Number of rows must be greater than one.\n";
throw logic_error(buffer.str());
}
#endif
// Mean
Vector<double> mean(columns_number, 0.0);
Vector<double> standard_deviation(columns_number, 0.0);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i);
mean[i] = current_column.calculate_mean();
standard_deviation[i] = current_column.calculate_standard_deviation();
}
return {mean, standard_deviation};
}
template <class T>
Vector< Vector<double> > SparseMatrix<T>::calculate_mean_standard_deviation(const Vector<size_t>& column_indices) const
{
const size_t column_indices_size = column_indices.size();
Vector<double> mean(column_indices_size);
Vector<double> standard_deviation(column_indices_size);
size_t column_index;
Vector<double> column(rows_number);
for(size_t i = 0; i < column_indices_size; i++)
{
column_index = column_indices[i];
column = get_column(column_index);
mean[i] = column.calculate_mean();
standard_deviation[i] = column.calculate_standard_deviation();
}
return {mean, standard_deviation};
}
template <class T>
Vector< Vector<double> > SparseMatrix<T>::calculate_mean_standard_deviation(const Vector<size_t>& row_indices, const Vector<size_t>& column_indices) const
{
const size_t column_indices_size = column_indices.size();
Vector<double> mean(column_indices_size);
Vector<double> standard_deviation(column_indices_size);
size_t column_index;
Vector<double> column(rows_number);
for(size_t i = 0; i < column_indices_size; i++)
{
column_index = column_indices[i];
column = get_column(column_index);
column = column.get_subvector(row_indices);
mean[i] = column.calculate_mean();
standard_deviation[i] = column.calculate_standard_deviation();
}
return {mean, standard_deviation};
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_median() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(columns_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median() const method.\n"
<< "Number of columns must be greater than one.\n";
throw logic_error(buffer.str());
}
#endif
// median
Vector<double> median(columns_number, 0.0);
for(size_t j = 0; j < columns_number; j++)
{
Vector<T> sorted_column(this->get_column(j));
sort(sorted_column.begin(), sorted_column.end(), less<double>());
if(rows_number % 2 == 0)
{
median[j] = (sorted_column[rows_number*2/4] + sorted_column[rows_number*2/4+1])/2;
}
else
{
median[j] = sorted_column[rows_number*2/4];
}
}
return(median);
}
template <class T>
double SparseMatrix<T>::calculate_median(const size_t& column_index) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "double calculate_median(const size_t&) const method.\n"
<< "Number of rows must be greater than one.\n";
throw logic_error(buffer.str());
}
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "double calculate_median(const size_t&) const method.\n"
<< "Index of column must be less than number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// median
double median = 0.0;
Vector<T> sorted_column(this->get_column(column_index));
sort(sorted_column.begin(), sorted_column.end(), less<double>());
if(rows_number % 2 == 0)
{
median = (sorted_column[rows_number*2/4] + sorted_column[rows_number*2/4+1])/2;
}
else
{
median = sorted_column[rows_number*2/4];
}
return(median);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_median(const Vector<size_t>& median_column_indices) const
{
const size_t column_indices_size = median_column_indices.size();
size_t column_index;
// median
Vector<double> median(column_indices_size, 0.0);
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = median_column_indices[j];
Vector<T> sorted_column(this->get_column(column_index));
sort(sorted_column.begin(), sorted_column.end(), less<double>());
if(rows_number % 2 == 0)
{
median[j] = (sorted_column[rows_number * 2 / 4] + sorted_column[rows_number * 2 / 4 + 1])/2;
}
else
{
median[j] = sorted_column[rows_number * 2 / 4];
}
}
return(median);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_median(const Vector<size_t>& median_row_indices, const Vector<size_t>& median_column_indices) const
{
const size_t row_indices_size = median_row_indices.size();
const size_t column_indices_size = median_column_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
// Rows check
if(row_indices_size > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Size of row indices(" << row_indices_size << ") is greater than number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < row_indices_size; i++)
{
if(median_row_indices[i] >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Row index " << i << " must be less than rows number.\n";
throw logic_error(buffer.str());
}
}
if(row_indices_size == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Size of row indices must be greater than zero.\n";
throw logic_error(buffer.str());
}
// Columns check
if(column_indices_size > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Column indices size must be equal or less than columns number.\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < column_indices_size; i++)
{
if(median_column_indices[i] >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median(const Vector<size_t>&, const Vector<size_t>&) const method.\n"
<< "Column index " << i << " must be less than columns number.\n";
throw logic_error(buffer.str());
}
}
#endif
size_t column_index;
// median
Vector<double> median(column_indices_size, 0.0);
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = median_column_indices[j];
Vector<T> sorted_column(this->get_column(column_index));
sorted_column = sorted_column.get_subvector(median_row_indices);
sort(sorted_column.begin(), sorted_column.end(), less<double>());
if(row_indices_size % 2 == 0)
{
median[j] = (sorted_column[row_indices_size*2/4] + sorted_column[row_indices_size*2/4 + 1])/2;
}
else
{
median[j] = sorted_column[row_indices_size * 2 / 4];
}
}
return(median);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_median_missing_values(const Vector< Vector<size_t> >& missing_indices) const
{
Vector<size_t> row_indices(0, 1, rows_number-1);
Vector<size_t> column_indices(0, 1, columns_number-1);
return(calculate_median_missing_values(row_indices, column_indices, missing_indices));
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_median_missing_values(const Vector<size_t>& median_row_indices,
const Vector<size_t>& median_column_indices,
const Vector< Vector<size_t> >& missing_indices) const
{
const size_t column_indices_size = median_column_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t row_indices_size = median_row_indices.size();
// Rows check
if(row_indices_size > rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Size of row indices(" << row_indices_size << ") is greater than number of rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < row_indices_size; i++)
{
if(median_row_indices[i] >= rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median_missing_values(const Vector<size_t>&, const Vector<size_t>&, Vector< Vector<size_t> >&) const method.\n"
<< "Row index " << i << " must be less than rows number.\n";
throw logic_error(buffer.str());
}
}
if(row_indices_size == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Size of row indices must be greater than zero.\n";
throw logic_error(buffer.str());
}
// Columns check
if(column_indices_size > columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Column indices size must be equal or less than columns number.\n";
throw logic_error(buffer.str());
}
for(size_t i = 0; i < column_indices_size; i++)
{
if(median_column_indices[i] >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector<double> calculate_median_missing_values(const Vector<size_t>&, const Vector<size_t>&, const Vector< Vector<size_t> >&) const method.\n"
<< "Column index " << i << " must be less than columns number.\n";
throw logic_error(buffer.str());
}
}
#endif
// median
Vector<double> median(column_indices_size, 0.0);
Vector< Vector<size_t> > used_rows(column_indices_size);
#pragma omp parallel for schedule(dynamic)
for(int i = 0; i < (int)column_indices_size; i++)
{
used_rows[i] = median_row_indices.get_difference(missing_indices[i]);
}
#pragma omp parallel for schedule(dynamic)
for(int j = 0; j < (int)column_indices_size; j++)
{
const size_t column_index = median_column_indices[j];
const size_t current_rows_number = used_rows[j].size();
Vector<T> sorted_column(this->get_column(column_index));
sorted_column = sorted_column.get_subvector(used_rows[j]);
sort(sorted_column.begin(), sorted_column.end(), less<double>());
if(current_rows_number % 2 == 0)
{
median[j] = (sorted_column[current_rows_number*2/4] + sorted_column[current_rows_number*2/4+1])/2;
}
else
{
median[j] = sorted_column[current_rows_number * 2 / 4];
}
}
return(median);
}
template <class T>
T SparseMatrix<T>::calculate_minimum() const
{
return min(T(), matrix_values.calculate_minimum());
}
template <class T>
T SparseMatrix<T>::calculate_maximum() const
{
return min(T(), matrix_values.calculate_maximum());
}
template <class T>
T SparseMatrix<T>::calculate_column_minimum(const size_t& column_index) const
{
return get_column(column_index).calculate_minimum();
}
template <class T>
T SparseMatrix<T>::calculate_column_maximum(const size_t& column_index) const
{
return get_column(column_index).calculate_maximum();
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_means_binary() const
{
if(columns_number == 2)
{
return calculate_means_binary_column();
}
else
{
return calculate_means_binary_columns();
}
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_means_binary_column() const
{
Vector<T> means(2,0.0);
size_t count = 0;
Vector<T> first_column = get_column(0);
Vector<T> second_column = get_column(1);
for(size_t i = 0; i < rows_number; i++)
{
if(first_column[i] == 0.0)
{
means[0] += second_column[i];
count++;
}
else if(first_column[i] == 1.0)
{
means[1] += second_column[i];
count++;
}
}
if(count != 0)
{
means[0] = (T)means[0]/(T)count;
means[1] = (T)means[1]/(T)count;
}
else
{
means[0] = 0.0;
means[1] = 0.0;
}
return means;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_means_binary_columns() const
{
Vector<T> means(columns_number-1);
T sum = 0.0;
size_t count = 0;
const Vector<T> last_column = get_column(columns_number-1);
for(size_t i = 0; i < columns_number-1; i++)
{
sum = 0.0;
count = 0;
const Vector<T> current_column = get_column(i);
for(size_t j = 0; j < rows_number; j++)
{
if(current_column[j] == 1.0)
{
sum += last_column[j];
count++;
}
}
if(count != 0)
{
means[i] = (T)sum/(T)count;
}
else
{
means[i] = 0.0;
}
}
return means;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_means_binary_missing_values(const Vector< Vector<size_t> >& missing_indices) const
{
if(columns_number == 2)
{
return calculate_means_binary_column_missing_values(missing_indices);
}
else
{
return calculate_means_binary_columns_missing_values(missing_indices);
}
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_means_binary_column_missing_values(const Vector< Vector<size_t> >& missing_indices) const
{
Vector<T> means(2,0.0);
size_t count = 0;
Vector<T> first_column = get_column(0);
Vector<T> second_column = get_column(1);
for(size_t i = 0; i < rows_number; i++)
{
if(!missing_indices[0].contains(i))
{
if(first_column[i] == 0.0)
{
means[0] += second_column[i];
count++;
}
else if(first_column[i] == 1.0)
{
means[1] += second_column[i];
count++;
}
}
}
if(count != 0)
{
means[0] = (T)means[0]/(T)count;
means[1] = (T)means[1]/(T)count;
}
else
{
means[0] = 0.0;
means[1] = 0.0;
}
return means;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_means_binary_columns_missing_values(const Vector< Vector<size_t> >& missing_indices) const
{
Vector<T> means(columns_number-1);
T sum = 0.0;
size_t count = 0;
const Vector<T> last_column = get_column(columns_number-1);
for(size_t i = 0; i < columns_number-1; i++)
{
sum = 0.0;
count = 0;
const Vector<T> current_column = get_column(i);
for(size_t j = 0; j < rows_number; j++)
{
if(current_column[j] == 1.0 && !missing_indices[i].contains(j))
{
sum += last_column[j];
count++;
}
}
if(count != 0)
{
means[i] = (T)sum/(T)count;
}
else
{
means[i] = 0.0;
}
}
return means;
}
template <class T>
Vector< Vector<T> > SparseMatrix<T>::calculate_minimum_maximum() const
{
Vector<T> minimum(columns_number,(T)numeric_limits<double>::max());
Vector<T> maximum(columns_number,(T)-numeric_limits<double>::max());
for(size_t j = 0; j < columns_number; j++)
{
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(j);
const Vector<T> current_column_nonzero_values = matrix_values.get_subvector(current_column_nonzero_indices);
Vector<T> current_minimum_maximum = current_column_nonzero_values.calculate_minimum_maximum();
minimum[j] = min(T(),current_minimum_maximum[0]);
maximum[j] = max(T(),current_minimum_maximum[1]);
}
return {minimum, maximum};
}
template <class T>
Vector< Vector<T> > SparseMatrix<T>::calculate_minimum_maximum(const Vector<size_t>& calculate_column_indices) const
{
const size_t column_indices_size = calculate_column_indices.size();
#ifdef __OPENNN_DEBUG__
for(size_t i = 0; i < column_indices_size; i++)
{
if(calculate_column_indices[i] >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "Vector<T> calculate_minimum_maximum(const Vector<size_t>&) const method.\n"
<< "Index of column must be less than number of columns.\n";
throw logic_error(buffer.str());
}
}
#endif
Vector< Vector<T> > minimum_maximum(2);
Vector<T> minimum(column_indices_size,(T)numeric_limits<double>::max());
Vector<T> maximum(column_indices_size,(T)-numeric_limits<double>::max());
for(size_t j = 0; j < column_indices_size; j++)
{
const size_t column_index = calculate_column_indices[j];
const Vector<size_t> current_column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
const Vector<T> current_column_nonzero_values = matrix_values.get_subvector(current_column_nonzero_indices);
Vector<T> current_minimum_maximum = current_column_nonzero_values.calculate_minimum_maximum();
minimum[j] = min(T(),current_minimum_maximum[0]);
maximum[j] = max(T(),current_minimum_maximum[1]);
}
return {minimum, maximum};
}
template <class T>
Vector< Vector<T> > SparseMatrix<T>::calculate_minimum_maximum(const Vector<size_t>& calculate_row_indices, const Vector<size_t>& calculate_column_indices) const
{
const size_t row_indices_size = calculate_row_indices.size();
const size_t column_indices_size = calculate_column_indices.size();
Vector<T> minimum(column_indices_size,(T) numeric_limits<double>::max());
Vector<T> maximum(column_indices_size,(T)-numeric_limits<double>::max());
size_t row_index;
size_t column_index;
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = calculate_column_indices[j];
const Vector<T> current_column = get_column(j);
for(size_t i = 0; i < row_indices_size; i++)
{
row_index = calculate_row_indices[i];
if(current_column[row_index] < minimum[j])
{
minimum[j] = current_column[row_index];
}
if(current_column[row_index] > maximum[j])
{
maximum[j] = current_column[row_index];
}
}
}
return {minimum, maximum};
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_statistics() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector< Statistics<T> > calculate_statistics() const method.\n"
<< "Number of rows must be greater than one.\n";
throw logic_error(buffer.str());
}
#endif
Vector< Statistics<T> > statistics(columns_number);
Vector<T> column(rows_number);
#pragma omp parallel for private(column)
for(int i = 0; i < (int)columns_number; i++)
{
column = get_column(i);
statistics[i] = column.calculate_statistics();
}
return(statistics);
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_statistics(const Vector<size_t>& calculate_row_indices, const Vector<size_t>& calculate_column_indices) const
{
const size_t column_indices_size = calculate_column_indices.size();
Vector< Statistics<T> > statistics(column_indices_size);
Vector<size_t> unused_rows(0,1,rows_number);
unused_rows = unused_rows.get_difference(calculate_row_indices);
for(size_t i = 0; i < column_indices_size; i++)
{
const Vector<T> column = get_column(calculate_column_indices[i]);
statistics[i] = column.calculate_statistics_missing_values(unused_rows);
}
return statistics;
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_statistics(const Vector< Vector<size_t> >& calculate_row_indices, const Vector<size_t>& calculate_column_indices) const
{
const size_t column_indices_size = calculate_column_indices.size();
Vector< Statistics<T> > statistics(column_indices_size);
Vector<size_t> sequential_row_indices(0,1,rows_number);
for(size_t i = 0; i < column_indices_size; i++)
{
const Vector<T> column = get_column(calculate_column_indices[i]);
const Vector<size_t> current_unused_rows = sequential_row_indices.get_difference(calculate_row_indices[i]);
statistics[i] = column.calculate_statistics_missing_values(current_unused_rows);
}
return statistics;
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_statistics_missing_values(const Vector< Vector<size_t> >& missing_indices) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector< Statistics<double> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const method.\n"
<< "Number of rows must be greater than one.\n";
throw logic_error(buffer.str());
}
if(missing_indices.size() != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "Vector< Statistics<double> > calculate_statistics_missing_values(const Vector< Vector<size_t> >&) const method.\n"
<< "Size of missing indices(" << missing_indices.size() << ") must be equal to to number of columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
Vector< Statistics<T> > statistics(columns_number);
Vector<T> column(rows_number);
for(size_t i = 0; i < columns_number; i++)
{
column = get_column(i);
statistics[i] = column.calculate_statistics_missing_values(missing_indices[i]);
}
return(statistics);
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_columns_statistics_missing_values(const Vector<size_t>& calculate_column_indices,
const Vector< Vector<size_t> > missing_indices) const
{
const size_t column_indices_size = calculate_column_indices.size();
Vector< Statistics<T> > statistics(column_indices_size);
size_t index;
Vector<T> column(rows_number);
#pragma omp parallel for private(index, column) schedule(dynamic)
for(int i = 0; i < (int)column_indices_size; i++)
{
index = calculate_column_indices[i];
column = get_column(index);
statistics[i] = column.calculate_statistics_missing_values(missing_indices[index]);
}
return statistics;
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_rows_statistics(const Vector<size_t>& calculate_row_indices) const
{
const size_t row_indices_size = calculate_row_indices.size();
Vector< Statistics<T> > statistics(row_indices_size);
Vector<size_t> unused_rows(0,1,rows_number);
unused_rows = unused_rows.get_difference(calculate_row_indices);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> column = get_column(i);
statistics[i] = column.calculate_statistics_missing_values(unused_rows);
}
return statistics;
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_rows_statistics_missing_values(const Vector<size_t>& calculate_row_indices,
const Vector< Vector<size_t> >& missing_indices) const
{
const size_t row_indices_size = calculate_row_indices.size();
Vector< Statistics<T> > statistics(columns_number);
Vector<T> column(row_indices_size);
for(size_t i = 0; i < columns_number; i++)
{
column = get_column(i);
column = column.get_subvector(calculate_row_indices);
statistics[i] = column.calculate_statistics_missing_values(missing_indices[i]);
}
return statistics;
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::calculate_columns_statistics(const Vector<size_t>& calculate_column_indices) const
{
const size_t column_indices_size = calculate_column_indices.size();
Vector< Statistics<T> > statistics(column_indices_size);
size_t index;
Vector<T> column(rows_number);
for(size_t i = 0; i < column_indices_size; i++)
{
index = calculate_column_indices[i];
column = get_column(index);
statistics[i] = column.calculate_statistics();
}
return statistics;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_rows_means(const Vector<size_t>& calculate_row_indices) const
{
Vector<size_t> used_row_indices;
if(calculate_row_indices.empty())
{
used_row_indices.set(this->get_rows_number());
used_row_indices.initialize_sequential();
}
else
{
used_row_indices = calculate_row_indices;
}
const size_t row_indices_size = used_row_indices.size();
Vector<T> means(columns_number);
Vector<T> column(row_indices_size);
for(size_t i = 0; i < columns_number; i++)
{
column = get_column(i);
column = column.get_subvector(used_row_indices);
means[i] = column.calculate_mean();
}
return means;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_columns_minimums(const Vector<size_t>& calculate_column_indices) const
{
Vector<size_t> used_column_indices;
if(calculate_column_indices.empty())
{
used_column_indices.set(columns_number);
used_column_indices.initialize_sequential();
}
else
{
used_column_indices = calculate_column_indices;
}
const size_t column_indices_size = used_column_indices.size();
Vector<T> minimums(column_indices_size);
size_t index;
Vector<T> column(rows_number);
for(size_t i = 0; i < column_indices_size; i++)
{
index = used_column_indices[i];
column = get_column(index);
minimums[i] = column.calculate_minimum();
}
return minimums;
}
template <class T>
Vector<T> SparseMatrix<T>::calculate_columns_maximums(const Vector<size_t>& calculate_column_indices) const
{
Vector<size_t> used_column_indices;
if(calculate_column_indices.empty())
{
used_column_indices.set(columns_number);
used_column_indices.initialize_sequential();
}
else
{
used_column_indices = calculate_column_indices;
}
const size_t column_indices_size = used_column_indices.size();
Vector<T> minimums(column_indices_size);
size_t index;
Vector<T> column(rows_number);
for(size_t i = 0; i < column_indices_size; i++)
{
index = used_column_indices[i];
column = get_column(index);
minimums[i] = column.calculate_maximum();
}
return minimums;
}
template <class T>
Vector< Vector<double> > SparseMatrix<T>::calculate_box_plots(const Vector<Vector<size_t> >& calculate_rows_indices,
const Vector<size_t>& calculate_columns_indices) const
{
const size_t calculate_columns_number = calculate_columns_indices.size();
#ifdef __OPENNN_DEBUG__
if(calculate_columns_number == calculate_rows_indices.size())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "Vector< Vector<double> > calculate_box_plots(const Vector<Vector<size_t> >&, const Vector<size_t>&) const method.\n"
<< "Size of row indices must be equal to the number of columns.\n";
throw logic_error(buffer.str());
}
#endif
Vector< Vector<double> > box_plots(calculate_columns_number);
for(size_t i = 0; i < calculate_columns_number; i++)
{
const Vector<size_t> current_column = get_column(calculate_columns_indices[i]).get_subvector(calculate_rows_indices[i]);
box_plots[i] = current_column.calculate_box_plot();
}
return box_plots;
}
template <class T>
SparseMatrix<double> SparseMatrix<T>::calculate_covariance_sparse_matrix() const
{
const size_t size = columns_number;
#ifdef __OPENNN_DEBUG__
if(size == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "SparseMatrix<double> calculate_covariance_sparse_matrix() const method.\n"
<< "Number of columns must be greater than zero.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<double> covariance_sparse_matrix(size, size);
Vector<T> first_column;
Vector<T> second_column;
for(size_t i = 0; i < size; i++)
{
first_column = get_column(i);
if(first_column == T())
{
continue;
}
for(size_t j = i; j < size; j++)
{
second_column = get_column(j);
if(second_column == T())
{
continue;
}
const double covariance = first_column.calculate_covariance(second_column);
if(covariance != 0.0)
{
covariance_sparse_matrix.set_element(i,j,covariance);
covariance_sparse_matrix.set_element(j,i,covariance);
}
}
}
return covariance_sparse_matrix;
}
template <class T>
Vector< Histogram<T> > SparseMatrix<T>::calculate_histograms(const size_t& bins_number) const
{
Vector< Histogram<T> > histograms(columns_number);
Vector<T> column(rows_number);
for(size_t i = 0; i < columns_number; i++)
{
column = get_column(i);
if(column.is_binary())
{
histograms[i] = column.calculate_histogram_binary();
}
else
{
histograms[i] = column.calculate_histogram(bins_number);
}
}
return(histograms);
}
template <class T>
Vector< Histogram<T> > SparseMatrix<T>::calculate_histograms_missing_values(const Vector< Vector<size_t> >& missing_indices, const size_t& bins_number) const
{
Vector< Histogram<T> > histograms(columns_number);
Vector<T> column(rows_number);
for(size_t i = 0; i < columns_number; i++)
{
column = get_column(i);
histograms[i] = column.calculate_histogram_missing_values(missing_indices[i], bins_number);
}
return(histograms);
}
template <class T>
void SparseMatrix<T>::scale_mean_standard_deviation(const Vector< Statistics<T> >& statistics)
{
#ifdef __OPENNN_DEBUG__
const size_t size = statistics.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "void scale_mean_standard_deviation(const Vector< Statistics<T> >&) const method.\n"
<< "Size of statistics vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Rescale data
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < rows_number; i++)
{
current_column[i] = (current_column[i] - statistics[j].mean)/statistics[j].standard_deviation;
}
set_column(j, current_column);
}
}
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::scale_mean_standard_deviation()
{
const Vector< Statistics<T> > statistics = calculate_statistics();
scale_mean_standard_deviation(statistics);
return(statistics);
}
template <class T>
void SparseMatrix<T>::scale_rows_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_row_indices)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
size_t row_index;
const size_t scale_row_number = scale_row_indices.size();
// Scale columns
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < scale_row_number; i++)
{
row_index = scale_row_indices[i];
current_column[row_index] = (current_column[row_index] - statistics[j].mean)/statistics[j].standard_deviation;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::scale_columns_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_columns_indices)
{
const size_t columns_indices_size = scale_columns_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_indices_size)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to size of columns indices.\n";
throw logic_error(buffer.str());
}
#endif
size_t column_index;
// Scale columns
for(size_t j = 0; j < columns_indices_size; j++)
{
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
column_index = scale_columns_indices[j];
Vector<double> current_column = get_column(column_index);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rows_number); i++)
{
current_column[i] = (current_column[i] - statistics[j].mean)/statistics[j].standard_deviation;
}
set_column(column_index, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::scale_minimum_maximum(const Vector< Statistics<T> >& statistics)
{
#ifdef __OPENNN_DEBUG__
const size_t size = statistics.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "void scale_minimum_maximum(const Vector< Statistics<T> >&) method.\n"
<< "Size of statistics vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Rescale data
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < rows_number; i++)
{
current_column[i] = 2.0*(current_column[i] - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum)-1.0;
}
set_column(j, current_column);
}
}
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::scale_minimum_maximum()
{
const Vector< Statistics<T> > statistics = calculate_statistics();
scale_minimum_maximum(statistics);
return(statistics);
}
template <class T>
void SparseMatrix<T>::scale_rows_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_row_indices)
{
// Control sentence(if debug)
const size_t row_indices_size = scale_row_indices.size();
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Rescale data
size_t row_index;
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < row_indices_size; i++)
{
row_index = scale_row_indices[i];
current_column[row_index] = 2.0*(current_column[row_index] - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum) - 1.0;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::scale_columns_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_columns_indices)
{
const size_t columns_indices_size = scale_columns_indices.size();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_indices_size)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to size of columns indices.\n";
throw logic_error(buffer.str());
}
#endif
size_t column_index;
// Scale columns
for(size_t j = 0; j < columns_indices_size; j++)
{
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
column_index = scale_columns_indices[j];
Vector<double> current_column = get_column(column_index);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rows_number); i++)
{
current_column[i] = (current_column[i] - statistics[j].mean)/statistics[j].standard_deviation;
}
set_column(column_index, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::scale_logarithmic(const Vector< Statistics<T> >& statistics)
{
#ifdef __OPENNN_DEBUG__
const size_t size = statistics.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "void scale_logarithmic(const Vector< Statistics<T> >&) method.\n"
<< "Size of statistics vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Rescale data
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < rows_number; i++)
{
current_column[i] = log(1.0+ (2.0*(current_column[i] - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum)));
}
set_column(j, current_column);
}
}
}
template <class T>
Vector< Statistics<T> > SparseMatrix<T>::scale_logarithmic()
{
const Vector< Statistics<T> > statistics = calculate_statistics();
scale_logarithmic(statistics);
return(statistics);
}
template <class T>
void SparseMatrix<T>::scale_rows_logarithmic(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_row_indices)
{
// Control sentence(if debug)
const size_t row_indices_size = scale_row_indices.size();
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_rows_logarithmic(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Rescale data
size_t row_index;
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < row_indices_size; i++)
{
row_index = scale_row_indices[i];
current_column[row_index] = log(1.0+ (2.0*(current_column[row_index] - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum)));
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::scale_columns_logarithmic(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_column_indices)
{
// Control sentence(if debug)
const size_t column_indices_size = scale_column_indices.size();
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != column_indices_size)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_columns_logarithmic(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to size of columns indices.\n";
throw logic_error(buffer.str());
}
#endif
size_t column_index;
// Rescale data
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = scale_column_indices[j];
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(column_index);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rows_number); i++)
{
current_column[i] = log(1.0+ (2.0*(current_column[i] - statistics[j].minimum)/(statistics[j].maximum-statistics[j].minimum)));
}
set_column(column_index, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_mean_standard_deviation(const Vector< Statistics<T> >& statistics)
{
#ifdef __OPENNN_DEBUG__
const size_t size = statistics.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "void unscale_mean_standard_deviation(const Vector< Statistics<T> >&) const method.\n"
<< "Size of statistics vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < rows_number; i++)
{
current_column[i] = current_column[i]*statistics[j].standard_deviation + statistics[j].mean;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_rows_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_row_indices)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void scale_rows_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
size_t row_index;
const size_t scale_row_number = scale_row_indices.size();
// Scale columns
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < scale_row_number; i++)
{
row_index = scale_row_indices[i];
current_column[row_index] = current_column[row_index]*statistics[j].standard_deviation + statistics[j].mean;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_column_indices)
{
// Control sentence(if debug)
const size_t column_indices_size = scale_column_indices.size();
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != column_indices_size)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to size of columns indices.\n";
throw logic_error(buffer.str());
}
#endif
size_t column_index;
// Rescale data
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = scale_column_indices[j];
if(statistics[j].standard_deviation < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(column_index);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rows_number); i++)
{
current_column[i] = current_column[i]*statistics[j].standard_deviation + statistics[j].mean;
}
set_column(column_index, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_minimum_maximum(const Vector< Statistics<T> >& statistics)
{
#ifdef __OPENNN_DEBUG__
const size_t size = statistics.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "void unscale_minimum_maximum(const Vector< Statistics<T> >&) const method.\n"
<< "Size of statistics vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < rows_number; i++)
{
current_column[i] = 0.5*(current_column[i] + 1.0)*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_rows_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_row_indices)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void unscale_rows_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
size_t row_index;
const size_t scale_row_number = scale_row_indices.size();
// Scale columns
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < scale_row_number; i++)
{
row_index = scale_row_indices[i];
current_column[row_index] = 0.5*(current_column[row_index] + 1.0)*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_columns_minimum_maximum(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_column_indices)
{
// Control sentence(if debug)
const size_t column_indices_size = scale_column_indices.size();
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != column_indices_size)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void unscale_columns_minimum_maximum(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to size of columns indices.\n";
throw logic_error(buffer.str());
}
#endif
size_t column_index;
// Rescale data
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = scale_column_indices[j];
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(column_index);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rows_number); i++)
{
current_column[i] = 0.5*(current_column[i] + 1.0)*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum;
}
set_column(column_index, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_logarithmic(const Vector< Statistics<T> >& statistics)
{
#ifdef __OPENNN_DEBUG__
const size_t size = statistics.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template."
<< "void unscale_logarithmic(const Vector< Statistics<T> >&) const method.\n"
<< "Size of statistics vector must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < rows_number; i++)
{
current_column[i] = 0.5*(exp(current_column[i]))*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_rows_logarithmic(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_row_indices)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void unscale_rows_logarithmic(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
size_t row_index;
const size_t scale_row_number = scale_row_indices.size();
// Scale columns
for(size_t j = 0; j < columns_number; j++)
{
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(j);
for(size_t i = 0; i < scale_row_number; i++)
{
row_index = scale_row_indices[i];
current_column[row_index] = 0.5*(exp(current_column[row_index]))*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum;
}
set_column(j, current_column);
}
}
}
template <class T>
void SparseMatrix<T>::unscale_columns_logarithmic(const Vector< Statistics<T> >& statistics, const Vector<size_t>& scale_column_indices)
{
// Control sentence(if debug)
const size_t column_indices_size = scale_column_indices.size();
#ifdef __OPENNN_DEBUG__
const size_t statistics_size = statistics.size();
if(statistics_size != column_indices_size)
{
ostringstream buffer;
buffer << "OpenNN Exception: Vector template.\n"
<< "void unscale_columns_mean_standard_deviation(const Vector< Statistics<T> >&, const Vector<size_t>&) method.\n"
<< "Size of statistics must be equal to size of columns indices.\n";
throw logic_error(buffer.str());
}
#endif
size_t column_index;
// Rescale data
for(size_t j = 0; j < column_indices_size; j++)
{
column_index = scale_column_indices[j];
if(statistics[j].maximum - statistics[j].minimum < numeric_limits<double>::min())
{
// Do nothing
}
else
{
Vector<double> current_column = get_column(column_index);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rows_number); i++)
{
current_column[i] = 0.5*(exp(current_column[i]))*(statistics[j].maximum-statistics[j].minimum) + statistics[j].minimum;
}
set_column(column_index, current_column);
}
}
}
template <class T>
Vector<size_t> SparseMatrix<T>::calculate_minimal_indices() const
{
T minimum = calculate_minimum();
Vector<size_t> minimal_indices(2,0);
if(minimum != T())
{
const size_t minimum_index = matrix_values.calculate_equal_to_indices(minimum)[0];
minimal_indices[0] = rows_indices[minimum_index];
minimal_indices[1] = columns_indices[minimum_index];
}
else
{
for(size_t i = 0; i < rows_number; i++)
{
const Vector<size_t> current_row = get_row(i);
if(current_row.contains(minimum))
{
minimal_indices[0] = i;
minimal_indices[1] = current_row.calculate_equal_to_indices(minimum);
break;
}
}
}
return minimal_indices;
}
template <class T>
Vector<size_t> SparseMatrix<T>::calculate_maximal_indices() const
{
T maximum = calculate_maximum();
Vector<size_t> maximal_indices(2,0);
if(maximum != T())
{
const size_t maximum_index = matrix_values.calculate_equal_to_indices(maximum)[0];
maximal_indices[0] = rows_indices[maximum_index];
maximal_indices[1] = columns_indices[maximum_index];
}
else
{
for(size_t i = 0; i < rows_number; i++)
{
const Vector<size_t> current_row = get_row(i);
if(current_row.contains(maximum))
{
maximal_indices[0] = i;
maximal_indices[1] = current_row.calculate_equal_to_indices(maximum);
break;
}
}
}
return maximal_indices;
}
template <class T>
Vector< Vector<size_t> > SparseMatrix<T>::calculate_minimal_maximal_indices() const
{
const Vector<size_t> minimal_indices = calculate_minimal_indices();
const Vector<size_t> maximal_indices = calculate_maximal_indices();
Vector< Vector<size_t> > minimal_maximal_indices(2);
minimal_maximal_indices[0] = minimal_indices;
minimal_maximal_indices[1] = maximal_indices;
return(minimal_maximal_indices);
}
template <class T>
double SparseMatrix<T>::calculate_sum_squared_error(const SparseMatrix<double>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_sum_squared_error(const SparseMatrix<double>&) const method.\n"
<< "Other number of rows must be equal to this number of rows.\n";
throw logic_error(buffer.str());
}
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_sum_squared_error(const SparseMatrix<double>&) const method.\n"
<< "Other number of columns must be equal to this number of columns.\n";
throw logic_error(buffer.str());
}
#endif
double sum_squared_error = 0.0;
//#pragma omp parallel for reduction(+:sum_squared_error)
for(int i = 0; i < columns_number; i++)
{
const Vector<T> this_current_column = this->get_column(i);
const Vector<T> other_current_column = other_sparse_matrix.get_column(i);
sum_squared_error += this_current_column.calculate_sum_squared_error(other_current_column);
}
return(sum_squared_error);
}
template <class T>
double SparseMatrix<T>::calculate_sum_squared_error(const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_sum_squared_error(const Matrix<double>&) const method.\n"
<< "Other number of rows must be equal to this number of rows.\n";
throw logic_error(buffer.str());
}
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_sum_squared_error(const Matrix<double>&) const method.\n"
<< "Other number of columns must be equal to this number of columns.\n";
throw logic_error(buffer.str());
}
#endif
double sum_squared_error = 0.0;
//#pragma omp parallel for reduction(+:sum_squared_error)
for(int i = 0; i < columns_number; i++)
{
const Vector<T> this_current_column = this->get_column(i);
const Vector<T> other_current_column = other_matrix.get_column(i);
sum_squared_error += this_current_column.calculate_sum_squared_error(other_current_column);
}
return(sum_squared_error);
}
template <class T>
double SparseMatrix<T>::calculate_minkowski_error(const SparseMatrix<double>& other_sparse_matrix, const double& minkowski_parameter) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_minkowski_error(const SparseMatrix<double>&, const double&) const method.\n"
<< "Other number of rows must be equal to this number of rows.\n";
throw logic_error(buffer.str());
}
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_minkowski_error(const SparseMatrix<double>&, const double&) const method.\n"
<< "Other number of columns must be equal to this number of columns.\n";
throw logic_error(buffer.str());
}
#endif
double minkowski_error = 0.0;
//#pragma omp parallel for reduction(+:minkowski_error)
for(int i = 0; i < columns_number; i++)
{
const Vector<T> this_current_column = this->get_column(i);
const Vector<T> other_current_column = other_sparse_matrix.get_column(i);
minkowski_error += this_current_column.calculate_Minkowski_error(other_current_column, minkowski_parameter);
}
return(minkowski_error);
}
template <class T>
double SparseMatrix<T>::calculate_minkowski_error(const Matrix<T>& other_matrix, const double& minkowski_parameter) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
if(other_rows_number != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_minkowski_error(const Matrix<double>&, const double&) const method.\n"
<< "Other number of rows must be equal to this number of rows.\n";
throw logic_error(buffer.str());
}
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_minkowski_error(const Matrix<double>&, const double&) const method.\n"
<< "Other number of columns must be equal to this number of columns.\n";
throw logic_error(buffer.str());
}
#endif
double minkowski_error = 0.0;
//#pragma omp parallel for reduction(+:minkowski_error)
for(int i = 0; i < columns_number; i++)
{
const Vector<T> this_current_column = this->get_column(i);
const Vector<T> other_current_column = other_matrix.get_column(i);
minkowski_error += this_current_column.calculate_Minkowski_error(other_current_column, minkowski_parameter);
}
return(minkowski_error);
}
template <class T>
double SparseMatrix<T>::calculate_sum_squared_error(const Vector<double>& vector) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "double calculate_sum_squared_error(const Vector<double>&) const method.\n"
<< "Size must be equal to number of columns.\n";
throw logic_error(buffer.str());
}
#endif
double sum_squared_error = 0.0;
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i);
sum_squared_error += current_row.calculate_sum_squared_error(vector);
}
return(sum_squared_error);
}
template <class T>
Vector<double> SparseMatrix<T>::calculate_rows_norm() const
{
Vector<double> rows_norm(rows_number, 0.0);
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i);
rows_norm[i] = current_row*current_row;
rows_norm[i] = sqrt(rows_norm[i]);
}
return(rows_norm);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::calculate_absolute_value() const
{
SparseMatrix<T> absolute_value(rows_number,columns_number);
const Vector<T> absolute_matrix_values = matrix_values.calculate_absolute_value();
absolute_value.set_values(rows_indices, columns_indices, absolute_matrix_values);
return absolute_value;
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::calculate_transpose() const
{
SparseMatrix<T> transpose(columns_number,rows_number);
transpose.set_values(columns_indices, rows_indices, matrix_values);
return transpose;
}
template <class T>
T SparseMatrix<T>::calculate_determinant() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(empty())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_determinant() const method.\n"
<< "Sparse matrix is empty.\n";
throw logic_error(buffer.str());
}
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_determinant() const method.\n"
<< "Sparse matrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
T determinant = 0;
if(rows_number == 1)
{
determinant = (*this)(0,0);
}
else if(rows_number == 2)
{
const Vector<T> first_row = get_row(0);
const Vector<T> second_row = get_row(1);
determinant = first_row[0]*second_row[1] - second_row[0]*first_row[1];
}
else
{
int sign;
const Vector<T> first_row = get_row(0);
for(size_t column_index = 0; column_index < columns_number; column_index++)
{
// Calculate sub data
SparseMatrix<T> sub_sparse_matrix(rows_number-1, columns_number-1);
Vector<size_t> sub_rows_indices = rows_indices - 1;
Vector<size_t> sub_columns_indices = columns_indices;
Vector<T> sub_matrix_values = matrix_values;
const Vector<size_t> current_column_indices = columns_indices.calculate_equal_to_indices(column_index);
sub_rows_indices = sub_rows_indices.delete_indices(current_column_indices);
sub_columns_indices = sub_columns_indices.delete_indices(current_column_indices);
sub_matrix_values = sub_matrix_values.delete_indices(current_column_indices);
for(size_t i = 0; i < sub_columns_indices.size(); i++)
{
if(sub_columns_indices[i] > column_index)
{
sub_columns_indices[i]--;
}
}
sub_sparse_matrix.set_values(sub_rows_indices, sub_columns_indices, sub_matrix_values);
//sign = (size_t)(pow(-1.0, row_index+2.0));
sign = static_cast<int>((((column_index + 2) % 2) == 0) ? 1 : -1 );
determinant += sign*first_row[column_index]*sub_sparse_matrix.calculate_determinant();
}
}
return(determinant);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::calculate_cofactor() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(empty())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_cofactor() const method.\n"
<< "Sparse matrix is empty.\n";
throw logic_error(buffer.str());
}
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_cofactor() const method.\n"
<< "Sparse matrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> cofactor(rows_number, columns_number);
SparseMatrix<T> c;
const Vector<size_t> sequential_row_indices(0,1,rows_number-1);
const Vector<size_t> sequential_column_indices(0,1,columns_number-1);
for(size_t i = 0; i < rows_number; i++)
{
for(size_t j = 0; j < columns_number; j++)
{
const Vector<size_t> this_row_indices = sequential_row_indices.delete_index(i);
const Vector<size_t> this_column_indices = sequential_column_indices.delete_index(j);
c = this->get_sub_sparse_matrix(this_row_indices, this_column_indices);
const double determinant = c.calculate_determinant();
const T value = static_cast<T>((((i + j) % 2) == 0) ? 1 : -1)*determinant;
cofactor.set_element(i,j,value);
//cofactor(i,j) = pow(-1.0, i+j+2.0)*determinant;
}
}
return(cofactor);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::calculate_inverse() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(empty())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_inverse() const method.\n"
<< "Sparse matrix is empty.\n";
throw logic_error(buffer.str());
}
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_inverse() const method.\n"
<< "Sparse matrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
const double determinant = calculate_determinant();
if(determinant == 0.0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_inverse() const method.\n"
<< "Sparse matrix is singular.\n";
throw logic_error(buffer.str());
}
if(rows_number == 1)
{
SparseMatrix<T> inverse(1, 1);
inverse.set_element(0,0, 1.0/determinant);
return(inverse);
}
// Calculate cofactor SparseMatrix
const SparseMatrix<T> cofactor = calculate_cofactor();
// Adjoint SparseMatrix is the transpose of cofactor SparseMatrix
const SparseMatrix<T> adjoint = cofactor.calculate_transpose();
// Inverse SparseMatrix is adjoint SparseMatrix divided by SparseMatrix determinant
const SparseMatrix<T> inverse = adjoint/determinant;
return(inverse);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::calculate_LU_inverse() const /// @todo
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(empty())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_LU_inverse() const method.\n"
<< "Sparse matrix is empty.\n";
throw logic_error(buffer.str());
}
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_LU_inverse() const method.\n"
<< "Sparse matrix must be square.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> inverse(rows_number, columns_number);
// Eigen::Map<Eigen::SparseMatrix<double> > sm1(rows,cols,nnz,outerIndexPtr,innerIndices,values);
// const Eigen::Map<Eigen::SparseMatrixXd> this_eigen((double*)this->data(), rows_number, columns_number);
// Eigen::Map<Eigen::SparseMatrixXd> inverse_eigen(inverse.data(), rows_number, columns_number);
// inverse_eigen = this_eigen.inverse();
return(inverse);
}
/// @todo
template <class T>
Vector<T> SparseMatrix<T>::solve_LDLT(const Vector<double>&) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(empty())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "solve_LLT(const Vector<double>&) const method.\n"
<< "Sparse matrix is empty.\n";
throw logic_error(buffer.str());
}
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "solve_LLT(const Vector<double>&) const method.\n"
<< "Sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
//Vector<T> solution(rows_number);
//const Eigen::Map<Eigen::SparseMatrixXd> this_eigen((double*)this->data(), rows_number, columns_number);
//const Eigen::Map<Eigen::VectorXd> b_eigen((double*)b.data(),rows_number);
//Eigen::Map<Eigen::VectorXd> solution_eigen(solution.data(), rows_number);
// solution_eigen = this_eigen.ldlt().solve(b_eigen);
return Vector<T>();
}
template <class T>
double SparseMatrix<T>::calculate_distance(const size_t& first_index, const size_t& second_index) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(empty())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "calculate_distance(const size_t&, const size_t&) const method.\n"
<< "SparseMatrix is empty.\n";
throw logic_error(buffer.str());
}
#endif
const Vector<T> first_row = get_row(first_index);
const Vector<T> second_row = get_row(second_index);
return(first_row.calculate_distance(second_row));
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator + (const T& scalar) const
{
SparseMatrix<T> sum(rows_number, columns_number);
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i) + scalar;
sum.set_row(i, current_row);
}
return(sum);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator + (const Vector<T>& vector) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator + (const Vector<T>&) const.\n"
<< "Size of vector must be equal to number of rows.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> sum(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + vector;
sum.set_column(i, current_column);
}
return(sum);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator + (const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator + (const SparseMatrix<T>&) const.\n"
<< "Sizes of other sparse matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this sparse matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> sum(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + other_sparse_matrix.get_column(i);
sum.set_column(i, current_column);
}
return(sum);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator + (const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator + (const Matrix<T>&) const.\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> sum(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + other_matrix.get_column(i);
sum.set_column(i, current_column);
}
return(sum);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator -(const T& scalar) const
{
SparseMatrix<T> difference(rows_number, columns_number);
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i) - scalar;
difference.set_row(i, current_row);
}
return(difference);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator -(const Vector<T>& vector) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator -(const Vector<T>&) const.\n"
<< "Size of vector must be equal to number of rows.\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> difference(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) - vector;
difference.set_column(i, current_column);
}
return(difference);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator -(const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator -(const SparseMatrix<T>&) const.\n"
<< "Sizes of other sparse matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this sparse matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> difference(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) - other_sparse_matrix.get_column(i);
difference.set_column(i, current_column);
}
return(difference);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator -(const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator -(const Matrix<T>&) const.\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> difference(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) - other_matrix.get_column(i);
difference.set_column(i, current_column);
}
return(difference);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator *(const T& scalar) const
{
if(scalar == T())
{
SparseMatrix<T> product_sparse_matrix(rows_number,columns_number);
return(product_sparse_matrix);
}
Vector<T> product(matrix_values);
transform(matrix_values.begin(), matrix_values.end(), product.begin(),
bind2nd(multiplies<T>(), scalar));
SparseMatrix<T> product_sparse_matrix(rows_number,columns_number);
product_sparse_matrix.set_values(rows_indices, columns_indices, product);
return(product_sparse_matrix);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator *(const Vector<T>& vector) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator *(const Vector<T>&) const method.\n"
<< "Vector size(" << size << ") must be equal to number of SparseMatrix rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> product(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) * vector;
product.set_column(i, current_column);
}
return(product);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator *(const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator *(const SparseMatrix<T>&) const method.\n"
<< "Sizes of other sparse matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this sparse matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> product(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) * other_sparse_matrix.get_column(i);
product.set_column(i, current_column);
}
return(product);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator *(const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator *(const Matrix<T>&) const method.\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> product(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) * other_matrix.get_column(i);
product.set_column(i, current_column);
}
return(product);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator /(const T& scalar) const
{
Vector<T> cocient(matrix_values);
transform(matrix_values.begin(), matrix_values.end(), cocient.begin(),
bind2nd(divides<T>(), scalar));
SparseMatrix<T> cocient_sparse_matrix(rows_number,columns_number);
cocient_sparse_matrix.set_values(rows_indices, columns_indices, cocient);
return(cocient_sparse_matrix);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator /(const Vector<T>& vector) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator /(const Vector<T>&) const method.\n"
<< "Vector size(" << size << ") must be equal to number of SparseMatrix columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> cocient(rows_number, columns_number);
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i) / vector;
cocient.set_row(i, current_row);
}
return(cocient);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator /(const SparseMatrix<T>& other_sparse_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator /(const SparseMatrix<T>&) const method.\n"
<< "Sizes of other Sparsematrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this Sparsematrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> cocient(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) / other_sparse_matrix.get_column(i);
cocient.set_column(i, current_column);
}
return(cocient);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::operator /(const Matrix<T>& other_matrix) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> operator /(const Matrix<T>&) const method.\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> cocient(rows_number, columns_number);
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) / other_matrix.get_column(i);
cocient.set_column(i, current_column);
}
return(cocient);
}
template <class T>
void SparseMatrix<T>::operator += (const T& scalar)
{
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + scalar;
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator += (const Vector<T>& vector)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator += (const Vector<T>&).\n"
<< "Size of vector must be equal to number of rows.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + vector;
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator += (const SparseMatrix<T>& other_sparse_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator += (const SparseMatrix<T>&).\n"
<< "Sizes of other sparse matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this sparse matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + other_sparse_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator += (const Matrix<T>& other_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator += (const Matrix<T>&).\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) + other_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator -= (const T& scalar)
{
for(size_t i = 0; rows_number; i++)
{
const Vector<T> current_row = get_row(i) - scalar;
set_row(i, current_row);
}
}
template <class T>
void SparseMatrix<T>::operator -= (const Vector<T>& vector)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator -= (const Vector<T>&).\n"
<< "Size of vector must be equal to number of rows.\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) - vector;
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator -= (const SparseMatrix<T>& other_sparse_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator -= (const SparseMatrix<T>&).\n"
<< "Sizes of other sparse matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this sparse matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) - other_sparse_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator -= (const Matrix<T>& other_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator -= (const Matrix<T>&).\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be the same than sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) - other_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator *= (const T& scalar)
{
if(scalar == T())
{
set(rows_number,columns_number);
return;
}
Vector<T> product(matrix_values);
transform(matrix_values.begin(), matrix_values.end(), product.begin(),
bind2nd(multiplies<T>(), scalar));
set_values(rows_indices, columns_indices, product);
}
template <class T>
void SparseMatrix<T>::operator *= (const Vector<T>& vector)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator *= (const Vector<T>&) method.\n"
<< "Vector size(" << size << ") must be equal to number of SparseMatrix rows(" << rows_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) * vector;
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator *= (const SparseMatrix<T>& other_sparse_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator *= (const SparseMatrix<T>&) method.\n"
<< "Sizes of other sparse matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this sparse matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) * other_sparse_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator *= (const Matrix<T>& other_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator *= (const Matrix<T>&) method.\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) * other_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator /= (const T& scalar)
{
Vector<T> cocient(matrix_values);
transform(matrix_values.begin(), matrix_values.end(), cocient.begin(),
bind2nd(divides<T>(), scalar));
set_values(rows_indices, columns_indices, cocient);
}
template <class T>
void SparseMatrix<T>::operator /= (const Vector<T>& vector)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator /= (const Vector<T>&) method.\n"
<< "Vector size(" << size << ") must be equal to number of SparseMatrix columns(" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i) / vector;
set_row(i, current_row);
}
}
template <class T>
void SparseMatrix<T>::operator /= (const SparseMatrix<T>& other_sparse_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator /= (const SparseMatrix<T>&) method.\n"
<< "Sizes of other Sparsematrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this Sparsematrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) / other_sparse_matrix.get_column(i);
set_column(i, current_column);
}
}
template <class T>
void SparseMatrix<T>::operator /= (const Matrix<T>& other_matrix)
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
if(other_rows_number != rows_number || other_columns_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "void operator /= (const Matrix<T>&) method.\n"
<< "Sizes of other matrix (" << other_rows_number << "," << other_columns_number << ") must be equal to sizes of this matrix (" << rows_number << "," << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i) / other_matrix.get_column(i);
set_column(i, current_column);
}
}
/// @todo
template <class T>
Vector<double> SparseMatrix<T>::dot(const Vector<double>&) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t size = vector.size();
if(size != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "Vector<T> dot(const Vector<T>&) const method.\n"
<< "Vector size must be equal to sparse matrix number of columns.\n";
throw logic_error(buffer.str());
}
#endif
// Calculate SparseMatrix-vector poduct
Vector<double> product(rows_number);
// const Eigen::Map<Eigen::SparseMatrixXd> SparseMatrix_eigen((double*)this->data(), rows_number, columns_number);
// const Eigen::Map<Eigen::VectorXd> vector_eigen((double*)vector.data(), columns_number);
// Eigen::Map<Eigen::VectorXd> product_eigen(product.data(), rows_number);
// product_eigen = SparseMatrix_eigen*vector_eigen;
return(product);
}
template <class T>
SparseMatrix<double> SparseMatrix<T>::dot(const SparseMatrix<double>& other_sparse_matrix) const /// @todo
{
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
if(other_rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> dot(const SparseMatrix<T>&) const method.\n"
<< "The number of rows of the other sparse matrix (" << other_rows_number << ") must be equal to the number of columns of this sparse matrix (" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> product(rows_number, other_columns_number);
// const Eigen::Map<Eigen::SparseMatrixXd> this_eigen((double*)this->data(), rows_number, columns_number);
// const Eigen::Map<Eigen::SparseMatrixXd> other_eigen((double*)other_sparse_matrix.data(), other_rows_number, other_columns_number);
// Eigen::Map<Eigen::SparseMatrixXd> product_eigen(product.data(), rows_number, other_columns_number);
// product_eigen = this_eigen*other_eigen;
return(product);
}
template <class T>
Matrix<T> SparseMatrix<T>::dot(const Matrix<T>& other_matrix) const /// @todo
{
const size_t other_columns_number = other_matrix.get_columns_number();
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t other_rows_number = other_matrix.get_rows_number();
if(other_rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> dot(const SparseMatrix<T>&) const method.\n"
<< "The number of rows of the other sparse matrix (" << other_rows_number << ") must be equal to the number of columns of this sparse matrix (" << columns_number << ").\n";
throw logic_error(buffer.str());
}
#endif
SparseMatrix<T> product(rows_number, other_columns_number);
// const Eigen::Map<Eigen::SparseMatrixXd> this_eigen((double*)this->data(), rows_number, columns_number);
// const Eigen::Map<Eigen::SparseMatrixXd> other_eigen((double*)other_sparse_matrix.data(), other_rows_number, other_columns_number);
// Eigen::Map<Eigen::SparseMatrixXd> product_eigen(product.data(), rows_number, other_columns_number);
// product_eigen = this_eigen*other_eigen;
return(product);
}
template <class T>
Matrix<T> SparseMatrix<T>::calculate_eigenvalues() const /// @todo
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if((*this).get_columns_number() == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_eigen_values() const method.\n"
<< "Number of columns must be greater than zero.\n";
throw logic_error(buffer.str());
}
#endif
#ifdef __OPENNN_DEBUG__
if((*this).get_rows_number() == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_eigen_values() const method.\n"
<< "Number of rows must be greater than zero.\n";
throw logic_error(buffer.str());
}
#endif
#ifdef __OPENNN_DEBUG__
if((*this).get_columns_number() != (*this).get_rows_number())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_eigen_values() const method.\n"
<< "The SparseMatrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
Matrix<T> eigenvalues(rows_number, 1);
// const Eigen::Map<Eigen::SparseMatrixXd> this_eigen((double*)this->data(), rows_number, columns_number);
// const Eigen::SelfAdjointEigenSolver<Eigen::SparseMatrixXd> SparseMatrix_eigen(this_eigen, Eigen::EigenvaluesOnly);
// Eigen::Map<Eigen::SparseMatrixXd> eigenvalues_eigen(eigenvalues.data(), rows_number, 1);
// eigenvalues_eigen = SparseMatrix_eigen.eigenvalues();
return(eigenvalues);
}
template <class T>
Matrix<T> SparseMatrix<T>::calculate_eigenvectors() const /// @todo
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if((*this).get_columns_number() == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_eigen_values() const method.\n"
<< "Number of columns must be greater than zero.\n";
throw logic_error(buffer.str());
}
#endif
#ifdef __OPENNN_DEBUG__
if((*this).get_rows_number() == 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_eigen_values() const method.\n"
<< "Number of rows must be greater than zero.\n";
throw logic_error(buffer.str());
}
#endif
#ifdef __OPENNN_DEBUG__
if((*this).get_columns_number() != (*this).get_rows_number())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "SparseMatrix<T> calculate_eigen_values() const method.\n"
<< "The sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
Matrix<T> eigenvectors(rows_number, rows_number);
// const Eigen::Map<Eigen::SparseMatrixXd> this_eigen((double*)this->data(), rows_number, columns_number);
// const Eigen::SelfAdjointEigenSolver<Eigen::SparseMatrixXd> SparseMatrix_eigen(this_eigen, Eigen::ComputeEigenvectors);
// Eigen::Map<Eigen::SparseMatrixXd> eigenvectors_eigen(eigenvectors.data(), rows_number, rows_number);
// eigenvectors_eigen = SparseMatrix_eigen.eigenvectors();
return(eigenvectors);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::direct(const SparseMatrix<T>& other_sparse_matrix) const
{
const size_t other_rows_number = other_sparse_matrix.get_rows_number();
const size_t other_columns_number = other_sparse_matrix.get_columns_number();
const Vector<size_t> other_rows_indices = other_sparse_matrix.get_rows_indices();
const Vector<size_t> other_columns_indices = other_sparse_matrix.get_columns_indices();
const Vector<T> other_matrix_values = other_sparse_matrix.get_matrix_values();
const size_t other_nonzero_elements_number = other_matrix_values.size();
SparseMatrix<T> direct(rows_number*other_rows_number, columns_number*other_columns_number);
size_t alpha;
size_t beta;
const size_t this_nonzero_elements_number = matrix_values.size();
for(size_t i = 0; i < this_nonzero_elements_number; i++)
{
const size_t this_current_row = rows_indices[i];
const size_t this_current_column = columns_indices[i];
for(size_t j = 0; j < other_nonzero_elements_number; j++)
{
const size_t other_current_row = other_rows_indices[j];
const size_t other_current_column = other_columns_indices[j];
alpha = other_rows_number*this_current_row+other_current_row;
beta = other_columns_number*this_current_column+other_current_column;
direct.set_element(alpha,beta,matrix_values[i]*other_matrix_values[j]);
}
}
return(direct);
}
template <class T>
SparseMatrix<T> SparseMatrix<T>::direct(const Matrix<T>& other_matrix) const
{
const size_t other_rows_number = other_matrix.get_rows_number();
const size_t other_columns_number = other_matrix.get_columns_number();
SparseMatrix<T> direct(rows_number*other_rows_number, columns_number*other_columns_number);
size_t alpha;
size_t beta;
const size_t nonzero_elements_number = matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
const size_t this_current_row = rows_indices[i];
const size_t this_current_column = columns_indices[i];
for(size_t j = 0; j < other_rows_number; j++)
{
for(size_t k = 0; k < other_columns_number; k++)
{
alpha = other_rows_number*this_current_row+j;
beta = other_columns_number*this_current_column+k;
direct.set_element(alpha,beta,matrix_values[i]*other_matrix(j,k));
}
}
}
return(direct);
}
template <class T>
bool SparseMatrix<T>::empty() const
{
if(rows_number == 0 && columns_number == 0)
{
return(true);
}
else
{
return(false);
}
}
template <class T>
bool SparseMatrix<T>::is_square() const
{
if(rows_number == columns_number)
{
return(true);
}
else
{
return(false);
}
}
template <class T>
bool SparseMatrix<T>::is_symmetric() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_symmetric() const method.\n"
<< "Sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
const SparseMatrix<T> transpose = calculate_transpose();
if((*this) == transpose)
{
return(true);
}
else
{
return(false);
}
}
template <class T>
bool SparseMatrix<T>::is_antisymmetric() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_antisymmetric() const method.\n"
<< "Sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
const SparseMatrix<T> transpose = calculate_transpose();
if((*this) == transpose*(-1))
{
return(true);
}
else
{
return(false);
}
}
template <class T>
bool SparseMatrix<T>::is_diagonal() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_diagonal() const method.\n"
<< "Sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
const size_t nonzero_elements = matrix_values.size();
for(size_t i = 0; i < nonzero_elements; i++)
{
if(rows_indices[i] != columns_indices[i])
{
return false;
}
}
return true;
}
template <class T>
bool SparseMatrix<T>::is_scalar() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_scalar() const method.\n"
<< "Sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
return get_diagonal().is_constant();
}
template <class T>
bool SparseMatrix<T>::is_identity() const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(rows_number != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_unity() const method.\n"
<< "Sparse matrix must be squared.\n";
throw logic_error(buffer.str());
}
#endif
const size_t nonzero_elements = matrix_values.size();
for(size_t i = 0; i < nonzero_elements; i++)
{
if(rows_indices[i] != columns_indices[i] || matrix_values[i] != 1)
{
return false;
}
}
return true;
}
template <class T>
bool SparseMatrix<T>::is_binary() const
{
return(matrix_values == 1);
}
template <class T>
bool SparseMatrix<T>::is_column_binary(const size_t& column_index) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_column_binary(const size_t&) const method method.\n"
<< "Index of column(" << column_index << ") must be less than number of columns.\n";
throw logic_error(buffer.str());
}
#endif
const Vector<size_t> column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
if(column_nonzero_indices.size() == 0)
{
return true;
}
return(matrix_values.get_subvector(column_nonzero_indices) == 1);
}
template <class T>
bool SparseMatrix<T>::is_column_constant(const size_t& column_index) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(column_index >= columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_column_constant(const size_t&) const method method.\n"
<< "Index of column(" << column_index << ") must be less than number of columns.\n";
throw logic_error(buffer.str());
}
#endif
const Vector<size_t> column_nonzero_indices = columns_indices.calculate_equal_to_indices(column_index);
if(column_nonzero_indices.size() == 0)
{
return true;
}
return(matrix_values.get_subvector(column_nonzero_indices).is_constant());
}
template <class T>
bool SparseMatrix<T>::is_dense(const double& density_percentage) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
if(density_percentage > 1 || density_percentage <= 0)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix Template.\n"
<< "bool is_dense(const double&) const method method.\n"
<< "Density percentage must be between 0 and 1.\n";
throw logic_error(buffer.str());
}
#endif
if(matrix_values.size() >= rows_number*columns_number*density_percentage)
{
return true;
}
else
{
return false;
}
}
template <class T>
void SparseMatrix<T>::convert_association()
{
SparseMatrix<T> copy(*this);
set(copy.assemble_columns(copy));
}
template <class T>
KMeansResults<T> SparseMatrix<T>::calculate_k_means(const size_t&) const
{
KMeansResults<double> k_means_results;
/*
Vector< Vector<size_t> > clusters(k);
Matrix<double> previous_means(k, columns_number);
Matrix<double> means(k, columns_number);
const Vector<T> minimums = calculate_columns_minimums();
const Vector<T> maximums = calculate_columns_maximums();
size_t iterations = 0;
bool end = false;
// Calculate initial means
Vector<size_t> selected_rows(k);
const size_t initial_center = calculate_random_uniform<size_t>(0, rows_number);
previous_means.set_row(0, this->get_row(initial_center));
selected_rows[0] = initial_center;
for(size_t i = 1; i < k; i++)
{
Vector<double> minimum_distances(rows_number, 0.0);
#pragma omp parallel for
for(int j = 0; j < rows_number; j++)
{
Vector<double> distances(i, 0.0);
const Vector<T> row_data = get_row(j);
for(size_t l = 0; l < i; l++)
{
distances[l] = row_data.calculate_distance(previous_means.get_row(l));
}
const double minimum_distance = distances.calculate_minimum();
minimum_distances[j] = minimum_distance;
}
size_t sample_index = minimum_distances.calculate_sample_index_proportional_probability();
int random_failures = 0;
while(selected_rows.contains(sample_index))
{
sample_index = minimum_distances.calculate_sample_index_proportional_probability();
random_failures++;
if(random_failures > 5)
{
Vector<double> new_row(columns_number);
new_row.randomize_uniform(minimums, maximums);
previous_means.set_row(i, new_row);
break;
}
}
if(random_failures <= 5)
{
previous_means.set_row(i, get_row(sample_index));
}
}
// Main loop
while(!end)
{
clusters.clear();
clusters.set(k);
#pragma omp parallel for
for(int i = 0; i < rows_number; i++)
{
Vector<double> distances(k, 0.0);
const Vector<T> current_row = get_row(i);
for(size_t j = 0; j < k; j++)
{
distances[j] = current_row.calculate_distance(previous_means.get_row(j));
}
const size_t minimum_distance_index = distances.calculate_minimal_index();
#pragma omp critical
clusters[minimum_distance_index].push_back(i);
}
for(size_t i = 0; i < k; i++)
{
means.set_row(i,calculate_rows_means(clusters[i]));
}
if(previous_means == means)
{
end = true;
}
else if(iterations > 100)
{
end = true;
}
previous_means = means;
iterations++;
}
// k_means_results.means = means;
k_means_results.clusters = clusters;
*/
return(k_means_results);
}
// Correlation methods
template <class T>
Vector<T> SparseMatrix<T>::calculate_multiple_linear_regression_parameters(const Vector<T>& other) const
{
// Control sentence(if debug)
#ifdef __OPENNN_DEBUG__
const size_t data_size = this->get_rows_number();
const size_t other_size = other.size();
ostringstream buffer;
if(other_size != data_size) {
buffer << "OpenNN Exception: Vector Template.\n"
<< "LinearRegressionParameters<T> "
"calculate_multiple_linear_regression_parameters(const Vector<T>&) const "
"method.\n"
<< "Independent vector size must be equal to this size.\n";
throw logic_error(buffer.str());
}
#endif
const Matrix<T> matrix_transposed = this->calculate_transpose();
const Matrix<T> matrix_product = matrix_transposed.dot((*this));
const Matrix<T> first_factor = matrix_product.calculate_LU_inverse();
const Vector<T> second_factor = matrix_transposed.dot(other);
return first_factor.dot(second_factor);
}
template <class T>
double SparseMatrix<T>::calculate_multiple_linear_correlation(const Vector<T>& other) const
{
if(columns_number == 1) // Simple linear correlation
{
return this->get_column(0).calculate_linear_correlation(other);
}
const Vector<double> multiple_linear_regression_parameters = calculate_multiple_linear_regression_parameters(other);
const Vector<double> other_approximation = (*this).dot(multiple_linear_regression_parameters);
return other.calculate_linear_correlation(other_approximation);
}
// Serialization methods
// void print() const method
/// Prints to the screen in the SparseMatrix object.
template <class T>
void SparseMatrix<T>::print() const
{
cout << *this << endl;
}
template <class T>
void SparseMatrix<T>::load(const string& file_name)
{
ifstream file(file_name.c_str());
if(!file.is_open())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "void load(const string&) method.\n"
<< "Cannot open sparse matrix data file: " << file_name << "\n";
throw logic_error(buffer.str());
}
if(file.peek() == ifstream::traits_type::eof())
{
this->set();
return;
}
//file.is
// Set SparseMatrix sizes
string line;
getline(file, line);
if(line.empty())
{
set();
}
else
{
istringstream buffer(line);
istream_iterator<string> it(buffer);
istream_iterator<string> end;
const vector<string> results(it, end);
const size_t new_columns_number = (size_t)results.size();
size_t new_rows_number = 1;
while(file.good())
{
getline(file, line);
if(!line.empty())
{
new_rows_number++;
}
}
set(new_rows_number, new_columns_number);
// Clear file
file.clear();
file.seekg(0, ios::beg);
for(size_t i = 0; i < rows_number; i++)
{
Vector<T> current_row(columns_number);
for(size_t j = 0; j < columns_number; j++)
{
file >> current_row[j];
}
set_row(i, current_row);
}
}
// Close file
file.close();
}
template <class T>
Vector<string> SparseMatrix<T>::load_product_strings(const string& file_name, const char& separator)
{
Vector<string> products;
ifstream file(file_name.c_str());
if(!file.is_open())
{
ostringstream buffer;
buffer << "OpenNN Exception: Matrix template.\n"
<< "void load_csv(const string&,const char&) method.\n"
<< "Cannot open matrix data file: " << file_name << "\n";
throw logic_error(buffer.str());
}
if(file.peek() == ifstream::traits_type::eof())
{
this->set();
return products;
}
//file.is
// Set matrix sizes
string line;
getline(file, line);
if(line.empty())
{
set();
}
else
{
string token;
istringstream buffer(line);
while(getline(buffer, token, separator))
{
products.push_back(token);
}
size_t new_rows_number = 1;
while(file.good())
{
getline(file, line);
istringstream buffer(line);
while(getline(buffer, token, separator))
{
products.push_back(token);
}
if(!line.empty())
{
new_rows_number++;
}
}
const size_t number_of_nonzeros = products.size();
products = products.get_unique_elements();
const size_t new_columns_number = (size_t)products.size();
set(new_rows_number, new_columns_number);
rows_indices.set(number_of_nonzeros);
columns_indices.set(number_of_nonzeros);
matrix_values.set(number_of_nonzeros);
// Clear file
file.clear();
file.seekg(0, ios::beg);
size_t index = 0;
for(size_t i = 0; i < rows_number; i++)
{
getline(file, line);
istringstream buffer(line);
while(getline(buffer, token, separator))
{
const size_t current_column = products.calculate_equal_to_indices(token)[0];
rows_indices[index] = i;
columns_indices[index] = current_column;
matrix_values[index] = 1;
index++;
}
}
}
// Close file
file.close();
return products;
}
template <class T>
void SparseMatrix<T>::load_binary(const string& file_name)
{
ifstream file;
file.open(file_name.c_str(), ios::binary);
if(!file.is_open())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template.\n"
<< "void load_binary(const string&) method.\n"
<< "Cannot open binary file: " << file_name << "\n";
throw logic_error(buffer.str());
}
streamsize size = sizeof(size_t);
size_t new_columns_number;
size_t new_rows_number;
file.read(reinterpret_cast<char*>(&new_columns_number), size);
file.read(reinterpret_cast<char*>(&new_rows_number), size);
size = sizeof(double);
double value;
this->set(new_rows_number, new_columns_number);
for(size_t i = 0; i < new_columns_number; i++)
{
Vector<T> current_column(i);
for(size_t j = 0; j < new_rows_number; j++)
{
file.read(reinterpret_cast<char*>(&value), size);
current_column[j] = value;
}
set_column(i, current_column);
}
file.close();
}
template <class T>
void SparseMatrix<T>::save(const string& file_name) const
{
ofstream file(file_name.c_str());
if(!file.is_open())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template." << endl
<< "void save(const string) method." << endl
<< "Cannot open sparse matrix data file." << endl;
throw logic_error(buffer.str());
}
// Write file
file.precision(20);
for(size_t i = 0; i < rows_number; i++)
{
const Vector<T> current_row = get_row(i);
for(size_t j = 0; j < columns_number; j++)
{
file << current_row[j] << " ";
}
file << endl;
}
// Close file
file.close();
}
template <class T>
void SparseMatrix<T>::save_binary(const string& file_name) const
{
ofstream file(file_name.c_str(), ios::binary);
if(!file.is_open())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template." << endl
<< "void save(const string) method." << endl
<< "Cannot open SparseMatrix binary file." << endl;
throw logic_error(buffer.str());
}
// Write data
streamsize size = sizeof(size_t);
size_t m = columns_number;
size_t n = rows_number;
file.write(reinterpret_cast<char*>(&m), size);
file.write(reinterpret_cast<char*>(&n), size);
size = sizeof(double);
double value;
for(size_t i = 0; i < columns_number; i++)
{
const Vector<T> current_column = get_column(i);
for(size_t j = 0; j < rows_number; j++)
{
value = current_column[j];
file.write(reinterpret_cast<char*>(&value), size);
}
}
// Close file
file.close();
}
template <class T>
void SparseMatrix<T>::save_csv(const string& file_name, const char& separator, const Vector<string>& column_names, const Vector<string>& row_names, const string& nameID) const
{
ofstream file(file_name.c_str());
if(!file.is_open())
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template." << endl
<< "void save_csv(const string&, const char&, const Vector<string>&, const Vector<string>&) method." << endl
<< "Cannot open sparse matrix data file: " << file_name << endl;
throw logic_error(buffer.str());
}
if(column_names.size() != 0 && column_names.size() != columns_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template." << endl
<< "void save_csv(const string&, const char&, const Vector<string>&, const Vector<string>&) method." << endl
<< "Column names must have size 0 or " << columns_number << "." << endl;
throw logic_error(buffer.str());
}
if(row_names.size() != 0 && row_names.size() != rows_number)
{
ostringstream buffer;
buffer << "OpenNN Exception: SparseMatrix template." << endl
<< "void save_csv(const string&, const char&, const Vector<string>&, const Vector<string>&) method." << endl
<< "Row names must have size 0 or " << rows_number << "." << endl;
throw logic_error(buffer.str());
}
// Write file
if(!column_names.empty())
{
if(!row_names.empty())
{
file << nameID << separator;
}
for(size_t j = 0; j < columns_number; j++)
{
file << column_names[j];
if(j != columns_number-1)
{
file << separator;
}
}
file << endl;
}
file.precision(20);
for(size_t i = 0; i < rows_number; i++)
{
if(!row_names.empty())
{
file << row_names[i] << separator;
}
const Vector<T> current_row = get_row(i);
for(size_t j = 0; j < columns_number; j++)
{
file << current_row[j];
if(j != columns_number-1)
{
file << separator;
}
}
file << endl;
}
// Close file
file.close();
}
template <class T>
void SparseMatrix<T>::parse(const string& str)
{
if(str.empty())
{
set();
}
else
{
// Set SparseMatrix sizes
istringstream str_buffer(str);
string line;
getline(str_buffer, line);
istringstream line_buffer(line);
istream_iterator<string> it(line_buffer);
istream_iterator<string> end;
const vector<string> results(it, end);
const size_t new_columns_number = (size_t)results.size();
size_t new_rows_number = 1;
while(str_buffer.good())
{
getline(str_buffer, line);
if(!line.empty())
{
new_rows_number++;
}
}
set(new_rows_number, new_columns_number);
// Clear file
str_buffer.clear();
str_buffer.seekg(0, ios::beg);
for(size_t i = 0; i < rows_number; i++)
{
Vector<T> current_row(columns_number);
for(size_t j = 0; j < columns_number; j++)
{
str_buffer >> current_row[j];
}
set_row(i, current_row);
}
}
}
template <class T>
string SparseMatrix<T>::SparseMatrix_to_string(const char& separator) const
{
ostringstream buffer;
if(rows_number > 0 && columns_number > 0)
{
buffer << get_row(0).vector_to_string(separator);
for(size_t i = 1; i < rows_number; i++)
{
buffer << "\n"
<< get_row(i).vector_to_string(separator);
}
}
return(buffer.str());
}
template <class T>
SparseMatrix<size_t> SparseMatrix<T>::to_size_t_SparseMatrix() const
{
SparseMatrix<size_t> size_t_sparse_matrix(rows_number, columns_number);
const size_t nonzero_elements_number = matrix_values.size();
Vector<T> new_matrix_values(nonzero_elements_number);
for(size_t i = 0; i < nonzero_elements_number; i++)
{
new_matrix_values[i] = (size_t)matrix_values[i];
}
size_t_sparse_matrix.set_values(rows_indices, columns_indices, new_matrix_values);
return(size_t_sparse_matrix);
}
template <class T>
SparseMatrix<double> SparseMatrix<T>::to_double_SparseMatrix() const
{
SparseMatrix<double> double_sparse_matrix(rows_number, columns_number);
const size_t nonzero_elements_number = matrix_values.size();
Vector<double> new_matrix_values(nonzero_elements_number);
for(size_t i = 0; i < nonzero_elements_number; i++)
{
new_matrix_values[i] = (double)matrix_values[i];
}
double_sparse_matrix.set_values(rows_indices, columns_indices, new_matrix_values);
return(double_sparse_matrix);
}
template <class T>
SparseMatrix<string> SparseMatrix<T>::to_string_SparseMatrix(const size_t& precision) const
{
SparseMatrix<string> string_sparse_matrix(rows_number, columns_number);
const size_t nonzero_elements_number = matrix_values.size();
Vector<string> new_matrix_values(nonzero_elements_number);
ostringstream buffer;
for(size_t i = 0; i < nonzero_elements_number; i++)
{
buffer.str("");
buffer << setprecision(precision) << matrix_values[i];
new_matrix_values[i] = buffer.str();
}
string_sparse_matrix.set_values(rows_indices, columns_indices, new_matrix_values);
return(string_sparse_matrix);
}
template <class T>
Matrix<T> SparseMatrix<T>::to_matrix() const
{
Matrix<T> dense_matrix(rows_number,columns_number, T());
const size_t nonzero_elements_number = matrix_values.size();
for(size_t i = 0; i < nonzero_elements_number; i++)
{
const size_t row_index = rows_indices[i];
const size_t column_index = columns_indices[i];
dense_matrix(row_index, column_index) = matrix_values[i];
}
return dense_matrix;
}
template <class T>
Vector< Vector<T> > SparseMatrix<T>::to_vector_of_vectors() const
{
Vector< Vector<T> > vector_of_vectors(columns_number);
for(size_t i = 0; i < columns_number; i++)
{
vector_of_vectors[i] = get_column(i);
}
return vector_of_vectors;
}
template <class T>
Vector< Vector<size_t> > SparseMatrix<T>::to_CSR(Vector<T>& new_matrix_values) const
{
const size_t nonzero_elements_number = matrix_values.size();
Vector< Vector<size_t> > CSR_indices(2);
CSR_indices[0].set(nonzero_elements_number);
CSR_indices[1].set(columns_number+1, 0);
new_matrix_values.set(nonzero_elements_number);
const Vector<size_t> unique_columns_sorted = columns_indices.get_unique_elements();
const size_t unique_columns_number = unique_columns_sorted.size();
size_t index = 0;
for(size_t i = 0; i < unique_columns_number; i++)
{
const size_t current_column_index = unique_columns_sorted[i];
const Vector<size_t> current_columns_indices = columns_indices.calculate_equal_to_indices(current_column_index);
const Vector<size_t> current_rows_indices = rows_indices.get_subvector(current_columns_indices).sort_ascending_indices();
const size_t current_rows_number = current_rows_indices.size();
for(size_t j = 0; j < current_rows_number; j++)
{
const size_t current_row_index = rows_indices[current_columns_indices[current_rows_indices[j]]];
for(size_t k = current_column_index+1; k <= columns_number; k++)
{
CSR_indices[1][k]++;
}
CSR_indices[0][index] = current_row_index;
new_matrix_values[index] = matrix_values[current_columns_indices[current_rows_indices[j]]];
index++;
}
}
return CSR_indices;
}
template <class T>
void SparseMatrix<T>::from_CSR(const Vector<size_t>& csr_rows_indices, const Vector<size_t>& csr_columns_indices, const Vector<T>& csr_matrix_values)
{
const size_t maximum_csr_rows_indices = csr_rows_indices.calculate_maximum();
const size_t csr_columns_number = csr_columns_indices.size() - 1;
if(rows_number == 0 || columns_number == 0)
{
set(maximum_csr_rows_indices,csr_columns_number);
}
if(rows_number < maximum_csr_rows_indices)
{
set(maximum_csr_rows_indices,columns_number);
}
if(columns_number < csr_columns_number)
{
set(rows_number,csr_columns_number);
}
rows_indices = csr_rows_indices;
matrix_values = csr_matrix_values;
columns_indices.set(matrix_values.size());
size_t index = 0;
for(size_t i = 1; i < csr_columns_indices.size(); i++)
{
const size_t nonzero_column_elements = csr_columns_indices[i] - csr_columns_indices[i-1];
for(size_t j = 0; j < nonzero_column_elements; j++)
{
columns_indices[index] = i-1;
index++;
}
}
}
template <class T>
void SparseMatrix<T>::print_preview() const
{
cout << "Rows number: " << rows_number << endl
<< "Columns number: " << columns_number << endl;
if(rows_number > 0)
{
const Vector<T> first_row = get_row(0);
cout << "Row 0:\n" << first_row << endl;
}
if(rows_number > 1)
{
const Vector<T> second_row = get_row(1);
cout << "Row 1:\n" << second_row << endl;
}
if(rows_number > 3)
{
const Vector<T> row = get_row(rows_number-2);
cout << "Row " << rows_number-1 << ":\n" << row << endl;
}
if(rows_number > 2)
{
const Vector<T> last_row = get_row(rows_number-1);
cout << "Row " << rows_number << ":\n" << last_row << endl;
}
}
// Output operator
/// This method re-writes the output operator << for the Matrix template.
/// @param os Output stream.
/// @param m Output matrix.
template<class T>
ostream& operator <<(ostream& os, const SparseMatrix<T>& m)
{
const size_t rows_number = m.get_rows_number();
const size_t columns_number = m.get_columns_number();
if(rows_number > 0 && columns_number > 0)
{
os << m.get_row(0);
for(size_t i = 1; i < rows_number; i++)
{
os << "\n"
<< m.get_row(i);
}
}
return(os);
}
}
#endif
// OpenNN: Open Neural Networks Library.
// Copyright(C) 2005-2018 Artificial Intelligence Techniques, SL.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
rg_filter.c | //////////////////////////////////////
// Cunren Liang, NASA JPL/Caltech
// Copyright 2015-2018...
//////////////////////////////////////
#include "resamp.h"
#include <fftw3.h>
#include <omp.h>
#define SWAP4(a) (*(unsigned int *)&(a) = (((*(unsigned int *)&(a) & 0x000000ff) << 24) | ((*(unsigned int *)&(a) & 0x0000ff00) << 8) | ((*(unsigned int *)&(a) >> 8) & 0x0000ff00) | ((*(unsigned int *)&(a) >> 24) & 0x000000ff)))
int rg_filter(char *inputfile, int nrg, int naz, int nout, char **outputfile, float *bw, float *bc, int nfilter, int nfft, float beta, int zero_cf, float offset, int byteorder, long imageoffset, long lineoffset){
/*
inputfile: input file
nrg file width
nout: number of output files
outputfile: (value_of_out_1, value_of_out_2, value_of_out_3...) output files
bw: (value_of_out_1, value_of_out_2, value_of_out_3...) filter bandwidth divided by sampling frequency [0, 1]
bc: (value_of_out_1, value_of_out_2, value_of_out_3...) filter center frequency divided by sampling frequency
nfilter: number samples of the filter (odd). Reference Value: 65
nfft: number of samples of the FFT. Reference Value: 1024
beta: kaiser window beta. Reference Value: 1.0
zero_cf: if bc != 0.0, move center frequency to zero? 0: Yes (Reference Value). 1: No.
offset: offset (in samples) of linear phase for moving center frequency. Reference Value: 0.0
byteorder: (0) LSB, little endian; (1) MSB, big endian of intput file
imageoffset: offset from start of the image of input file
lineoffset: length of each line of input file
*/
///////////////////////////////
// int k;
// printf("input parameters:");
// printf("%s\n", inputfile);
// printf("%d\n", nrg);
// printf("%d\n", nout);
// for(k =0; k<nout;k++){
// printf("%s\n", outputfile[k]);
// printf("%f\n", bw[k]);
// printf("%f\n", bc[k]);
// }
// printf("%d\n", nfilter);
// printf("%d\n", nfft);
// printf("%f\n", beta);
// printf("%d\n", zero_cf);
// printf("%f\n", offset);
///////////////////////////////
FILE *infp; //secondary image to be resampled
FILE **outfp; //resampled secondary image
fcomplex **filter;
fcomplex *in;
fcomplex **out;
fcomplex *tmp;
fcomplex *tmp2;
fcomplex *tmpf;
int *zeroflag;
fftwf_plan p_forward;
fftwf_plan p_backward;
fftwf_plan p_forward_filter;
//fftwf_plan p_backward_filter;
//int nout; //number of output files
//int nrg; //file width
//int naz; //file length
//int nfft; //fft length
//int nfilter; //filter length
int hnfilter;
//float *bw;
//float *bc;
//float beta; //kaiser window beta
//int zero_cf;
//float offset;
int argc_mand;
int nthreads;
float sc; //constant to scale the data read in to avoid large values
//during fft and ifft
float cf_pha;
float t;
fcomplex cf;
int nblock_in;
int nblock_out;
int num_block;
int i_block;
int nblock_in_last;
int nblock_out_last;
int i, j, i_out;
/*****************************************************************************/
//nfilter = 65;
//nfft = 1024;
//beta = 1.0;
//zero_cf = 0;
//offset = 0.0;
sc = 10000.0;
/*****************************************************************************/
infp = openfile(inputfile, "rb");
//naz = file_length(infp, nrg, sizeof(fcomplex));
//fseeko(infp,0L,SEEK_END);
//naz = (ftello(infp) - imageoffset) / (lineoffset + nrg*sizeof(fcomplex));
//rewind(infp);
printf("file width: %d, file length: %d\n\n", nrg, naz);
if(nout < 1){
fprintf(stderr, "there should be at least one output file!\n");
exit(1);
}
outfp = array1d_FILE(nout);
for(i = 0; i < nout; i++){
outfp[i] = openfile(outputfile[i], "wb");
}
//check filter length
if(nfilter < 3){
fprintf(stderr, "filter length: %d too small!\n", nfilter);
exit(1);
}
if(nfilter % 2 != 1){
fprintf(stderr, "filter length must be odd!\n");
exit(1);
}
if(byteorder == 0){
printf("inputfile byte order: little endian\n");
}
else{
printf("inputfile byte order: big endian\n");
}
printf("input file image offset [byte]: %ld\n", imageoffset);
printf("input file line offset [byte]: %ld\n", lineoffset);
if(imageoffset < 0){
fprintf(stderr, "image offset must be >= 0\n");
exit(1);
}
if(lineoffset < 0){
fprintf(stderr, "lineoffset offset must be >= 0\n");
exit(1);
}
//compute block processing parameters
hnfilter = (nfilter - 1) / 2;
nblock_in = nfft - nfilter + 1;
nblock_in += hnfilter;
if (nblock_in <= 0){
fprintf(stderr, "fft length too small compared with filter length!\n");
exit(1);
}
nblock_out = nblock_in - 2 * hnfilter;
num_block = (nrg - 2 * hnfilter) / nblock_out;
if((nrg - num_block * nblock_out - 2 * hnfilter) != 0){
num_block += 1;
}
if((nrg - 2 * hnfilter) <= 0){
num_block = 1;
}
if(num_block == 1){
nblock_out_last = 0;
nblock_in_last = nrg;
}
else{
nblock_out_last = nrg - (num_block - 1) * nblock_out - 2 * hnfilter;
nblock_in_last = nblock_out_last + 2 * hnfilter;
}
//allocate memory
filter = array2d_fcomplex(nout, nfft);
in = array1d_fcomplex(nrg);
out = array2d_fcomplex(nout, nrg);
tmp = array1d_fcomplex(nfft);
tmp2 = array1d_fcomplex(nfft);
tmpf = array1d_fcomplex(nfft);
zeroflag = array1d_int(nrg);
//as said in the FFTW document,
//Typically, the problem will have to involve at least a few thousand data points before threads become beneficial.
//so I choose not to use Multi-threaded FFTW, as our FFT size is mostly small.
if(0){
//////////////////////////////////////////////////////////////////////////////////////////////////
//Multi-threaded FFTW
nthreads = fftwf_init_threads();
if(nthreads == 0){
fprintf(stderr, "WARNING: there is some error in using multi-threaded FFTW.\n");
fprintf(stderr, " therefore it is not used, and computation performance is reduced.\n");
nthreads = 1;
}
else{
//int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads();
//nthreads = omp_get_num_threads();
nthreads = omp_get_max_threads();
}
printf("FFTW is using %d threads\n", nthreads);
//this works for all the following plans
if(nthreads != 1)
//actually it is OK to pass nthreads=1, in this case, threads are disabled.
fftwf_plan_with_nthreads(nthreads);
//////////////////////////////////////////////////////////////////////////////////////////////////
}
//create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays.
p_forward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp, (fftwf_complex*)tmp, FFTW_FORWARD, FFTW_MEASURE);
p_backward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp2, (fftwf_complex*)tmp2, FFTW_BACKWARD, FFTW_MEASURE);
p_forward_filter = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmpf, (fftwf_complex*)tmpf, FFTW_FORWARD, FFTW_ESTIMATE);
//computing filters
for(i = 0; i < nout; i++){
bandpass_filter(bw[i], bc[i], nfilter, nfft, (nfilter-1)/2, beta, tmpf);
//relationship of nr and matlab fft
//nr fft matlab fft
// 1 <==> ifft()*nfft
// -1 <==> fft()
//four1((float *)filter - 1, nfft, -1);
fftwf_execute(p_forward_filter);
for(j = 0; j < nfft; j++){
filter[i][j].re = tmpf[j].re;
filter[i][j].im = tmpf[j].im;
}
}
fftwf_destroy_plan(p_forward_filter);
//skip image header
if(imageoffset != 0)
fseek(infp, imageoffset, SEEK_SET);
//process data
for(i = 0; i < naz; i++){
//progress report
if((i + 1) % 1000 == 0 || (i + 1) == naz)
fprintf(stderr,"processing line: %6d of %6d\r", i+1, naz);
if((i + 1) == naz)
fprintf(stderr,"\n\n");
//read data
if(i != 0)
fseek(infp, lineoffset-(size_t)nrg * sizeof(fcomplex), SEEK_CUR);
readdata((fcomplex *)in, (size_t)nrg * sizeof(fcomplex), infp);
//swap bytes
if(byteorder!=0){
for(j = 0; j < nrg; j++){
SWAP4(in[j].re);
SWAP4(in[j].im);
}
}
#pragma omp parallel for private(j) shared(nrg,in, zeroflag, sc)
for(j = 0; j < nrg; j++){
if(in[j].re != 0.0 || in[j].im != 0.0){
zeroflag[j] = 1;
in[j].re *= 1.0 / sc;
in[j].im *= 1.0 / sc;
}
else{
zeroflag[j] = 0;
}
}
//process each block
for(i_block = 0; i_block < num_block; i_block++){
//zero out
//for(j = 0; j < nfft; j++){
// tmp[j].re = 0.0;
// tmp[j].im = 0.0;
//}
memset((void *)tmp, 0, (size_t)nfft*sizeof(fcomplex));
//get data
if(num_block == 1){
for(j = 0; j < nrg; j++){
tmp[j] = in[j];
}
}
else{
if(i_block == num_block - 1){
for(j = 0; j < nblock_in_last; j++){
tmp[j] = in[j+nblock_out*i_block];
}
}
else{
for(j = 0; j < nblock_in; j++){
tmp[j] = in[j+nblock_out*i_block];
}
}
}
//four1((float *)tmp - 1, nfft, -1);
//tested, the same as above
fftwf_execute(p_forward);
//process each output file
for(i_out = 0; i_out < nout; i_out++){
//looks like this makes it slower, so comment out
//#pragma omp parallel for private(j) shared(nfft, tmp2, filter, i_out, tmp)
for(j = 0; j < nfft; j++)
tmp2[j] = cmul(filter[i_out][j], tmp[j]);
//four1((float *)tmp2 - 1, nfft, 1);
//tested, the same as above
fftwf_execute(p_backward);
//get data
if(num_block == 1){
for(j = 0; j < nrg; j++){
out[i_out][j] = tmp2[j];
}
}
else{
if(i_block == 0){
for(j = 0; j < hnfilter + nblock_out; j++){
out[i_out][j] = tmp2[j];
}
}
else if(i_block == num_block - 1){
for(j = 0; j < hnfilter + nblock_out_last; j++){
out[i_out][nrg - 1 - j] = tmp2[nblock_in_last - 1 - j];
}
}
else{
for(j = 0; j < nblock_out; j++){
out[i_out][j + hnfilter + i_block * nblock_out] = tmp2[j + hnfilter];
}
}
}//end of getting data
}//end of processing each output file
}//end of processing each block
//move center frequency
if(zero_cf == 0){
//process each output file
//looks like this makes it slower, so comment out
//#pragma omp parallel for private(i_out, j, t, cf_pha, cf) shared(nout, bc, nrg, offset, out)
for(i_out = 0; i_out < nout; i_out++){
if(bc[i_out] != 0){
#pragma omp parallel for private(j, t, cf_pha, cf) shared(nrg, offset, bc, i_out, out)
for(j = 0; j < nrg; j++){
//t = j - (nrg - 1.0) / 2.0; //make 0 index exactly at range center
t = j + offset; //make 0 index exactly at range center
cf_pha = 2.0 * PI * (-bc[i_out]) * t;
cf.re = cos(cf_pha);
cf.im = sin(cf_pha);
out[i_out][j] = cmul(out[i_out][j], cf);
}
}
}
}
//scale back and write data
//process each output file
for(i_out = 0; i_out < nout; i_out++){
//scale back
#pragma omp parallel for private(j) shared(nrg, zeroflag, out, i_out, sc, nfft)
for(j = 0; j < nrg; j++){
if(zeroflag[j] == 0){
out[i_out][j].re = 0.0;
out[i_out][j].im = 0.0;
}
else{
out[i_out][j].re *= sc / nfft;
out[i_out][j].im *= sc / nfft;
}
}
//write data
writedata((fcomplex *)out[i_out], nrg * sizeof(fcomplex), outfp[i_out]);
}
}//end of processing data
fftwf_destroy_plan(p_forward);
fftwf_destroy_plan(p_backward);
free_array2d_fcomplex(filter);
free_array1d_fcomplex(in);
free_array2d_fcomplex(out);
free_array1d_fcomplex(tmp);
free_array1d_fcomplex(tmp2);
free_array1d_fcomplex(tmpf);
free_array1d_int(zeroflag);
//free_array1d_float(bw);
//free_array1d_float(bc);
fclose(infp);
for(i_out = 0; i_out < nout; i_out++)
fclose(outfp[i_out]);
//free_array1d_FILE(outfp);
return 0;
}//end main()
|
THTensorConv.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "internal/THTensorConv.c"
#else
/*
2D Input, 2D kernel : convolve given image with the given kernel.
*/
void THTensor_(validXCorr2Dptr)(real *r_,
real alpha,
real *t_, long ir, long ic,
real *k_, long kr, long kc,
long sr, long sc)
{
long or = (ir - kr) / sr + 1;
long oc = (ic - kc) / sc + 1;
long xx, yy, kx, ky;
if ((sc != 1) || (oc < 4)) {
/* regular convolution */
for(yy = 0; yy < or; yy++) {
for(xx = 0; xx < oc; xx++) {
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + yy*sr*ic + xx*sc;
real *pw_ = k_;
real sum = 0;
for(ky = 0; ky < kr; ky++) {
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[kx];
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
/* Update output */
*r_++ += alpha*sum;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < or; yy++) {
real *pi_ = t_ + yy*sr*ic;
real *pw_ = k_;
for (ky = 0; ky < kr; ky++) {
real *pis_ = pi_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(r_, r_, pis_, alpha*pw_[kx], oc);
pis_++;
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
r_ += oc;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel.
*/
void THTensor_(validConv2Dptr)(real *r_,
real alpha,
real *t_, long ir, long ic,
real *k_, long kr, long kc,
long sr, long sc)
{
long or = (ir - kr) / sr + 1;
long oc = (ic - kc) / sc + 1;
long xx, yy, kx, ky;
if ((sc != 1) || (oc < 4)) {
/* regular convolution */
for(yy = 0; yy < or; yy++) {
for(xx = 0; xx < oc; xx++) {
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + yy*sr*ic + xx*sc;
real *pw_ = k_ + kr*kc - 1;
real sum = 0;
for(ky = 0; ky < kr; ky++) {
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[-kx];
}
pi_ += ic; /* next input line */
pw_ -= kc; /* next mask line */
}
/* Update output */
*r_++ += alpha*sum;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < or; yy++) {
real *pw_ = k_ + kr*kc - 1;
real *pi_ = t_ + yy*sr*ic;
for (ky = 0; ky < kr; ky++) {
real *pis_ = pi_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(r_, r_, pis_, alpha*pw_[-kx], oc);
pis_++;
}
pi_ += ic; /* next input line */
pw_ -= kc; /* next mask line */
}
r_ += oc;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel, full convolution.
*/
void THTensor_(fullConv2Dptr)(real *r_,
real alpha,
real *t_, long ir, long ic,
real *k_, long kr, long kc,
long sr, long sc)
{
long oc = (ic - 1) * sc + kc;
long xx, yy, kx, ky;
if ((sc != 1) || (ic < 4)) {
/* regular convolution */
for(yy = 0; yy < ir; yy++) {
for(xx = 0; xx < ic; xx++) {
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + yy*sr*oc + xx*sc;
real *pw_ = k_;
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
po_[kx] += z * pw_[kx];
}
po_ += oc; /* next input line */
pw_ += kc; /* next mask line */
}
t_++;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < ir; yy++) {
real *po_ = r_ + yy*sr*oc;
real *pw_ = k_;
for (ky = 0; ky < kr; ky++) {
real *pos_ = po_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(pos_, pos_, t_, alpha*pw_[kx], ic);
pos_++;
}
po_ += oc; /* next input line */
pw_ += kc; /* next mask line */
}
t_ += ic;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel, full convolution.
*/
void THTensor_(fullXCorr2Dptr)(real *r_,
real alpha,
real *t_, long ir, long ic,
real *k_, long kr, long kc,
long sr, long sc)
{
long oc = (ic - 1) * sc + kc;
long xx, yy, kx, ky;
if ((sc != 1) || (ic < 4)) {
/* regular convolution */
for(yy = 0; yy < ir; yy++) {
for(xx = 0; xx < ic; xx++) {
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + yy*sr*oc + xx*sc;
real *pw_ = k_ + kr*kc -1;
long kx, ky;
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
po_[kx] += z * pw_[-kx];
}
po_ += oc; /* next input line */
pw_ -= kc; /* next mask line */
}
t_++;
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < ir; yy++) {
real *po_ = r_ + yy*sr*oc;
real *pw_ = k_ + kr*kc -1;
for (ky = 0; ky < kr; ky++) {
real *pos_ = po_;
for (kx = 0; kx < kc; kx++) {
THVector_(cadd)(pos_, pos_, t_, pw_[-kx]*alpha, ic);
pos_++;
}
po_ += oc; /* next input line */
pw_ -= kc; /* next mask line */
}
t_ += ic;
}
}
}
/*
2D Input, 2D kernel : convolve given image with the given kernel, valid convolution.
for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(validXCorr2DRevptr)(real *r_,
real alpha,
real *t_, long ir, long ic,
real *k_, long kr, long kc,
long sr, long sc)
{
long or = ir - (kr - 1) * sr;
long oc = ic - (kc - 1) * sc;
long xx, yy, kx, ky;
if ((sc != 1) || (kc < 4)) {
/* regular convolution */
for(yy = 0; yy < kr; yy++) {
for(xx = 0; xx < kc; xx++) {
real *po_ = r_;
real *pi_ = t_ + yy*sr*ic + xx*sc;
real z = *k_++ * alpha;
for(ky = 0; ky < or; ky++) {
for(kx = 0; kx < oc; kx++)
po_[kx] += z * pi_[kx];
pi_ += ic;
po_ += oc;
}
}
}
} else {
/* SSE-based convolution */
for(yy = 0; yy < kr; yy++) {
for(xx = 0; xx < kc; xx++) {
real *po_ = r_;
real *pi_ = t_ + yy*sr*ic + xx*sc;
real z = *k_++ * alpha;
for(ky = 0; ky < or; ky++) {
THVector_(cadd)(po_, po_, pi_, z, oc);
pi_ += ic;
po_ += oc;
}
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel.
*/
void THTensor_(validXCorr3Dptr)(real *r_,
real alpha,
real *t_, long it, long ir, long ic,
real *k_, long kt, long kr, long kc,
long st, long sr, long sc)
{
long ot = (it - kt) / st + 1;
long or = (ir - kr) / sr + 1;
long oc = (ic - kc) / sc + 1;
long zz, xx, yy;
for (zz = 0; zz < ot; zz++)
{
for(yy = 0; yy < or; yy++)
{
for(xx = 0; xx < oc; xx++)
{
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc;
real *pw_ = k_;
real sum = 0;
long kz, kx, ky;
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[kx];
}
pi_ += ic; /* next input line */
pw_ += kc; /* next mask line */
}
pi_ += (ir-kr)*ic; /* next input slice */
}
/* Update output */
*r_++ += sum*alpha;
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel.
*/
void THTensor_(validConv3Dptr)(real *r_,
real alpha,
real *t_, long it, long ir, long ic,
real *k_, long kt, long kr, long kc,
long st, long sr, long sc)
{
long ot = (it - kt) / st + 1;
long or = (ir - kr) / sr + 1;
long oc = (ic - kc) / sc + 1;
long zz, xx, yy;
for(zz = 0; zz < ot; zz++)
{
for(yy = 0; yy < or; yy++)
{
for(xx = 0; xx < oc; xx++)
{
/* Dot product in two dimensions... (between input image and the mask) */
real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc;
real *pw_ = k_ + kt*kr*kc - 1;
real sum = 0;
long kz, kx, ky;
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
for(kx = 0; kx < kc; kx++) {
sum += pi_[kx]*pw_[-kx];
}
pi_ += ic; /* next input line */
pw_ -= kc; /* next mask line */
}
pi_ += (ir-kr)*ic; /* next input slice */
}
/* Update output */
*r_++ += alpha*sum;
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel, full convolution.
*/
void THTensor_(fullConv3Dptr)(real *r_,
real alpha,
real *t_, long it, long ir, long ic,
real *k_, long kt, long kr, long kc,
long st, long sr, long sc)
{
long or = (ir - 1) * sr + kr;
long oc = (ic - 1) * sc + kc;
long zz, xx, yy;
for(zz = 0; zz < it; zz++)
{
for(yy = 0; yy < ir; yy++)
{
for(xx = 0; xx < ic; xx++)
{
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc;
real *pw_ = k_;
long kz, kx, ky;
/* printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); */
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
/* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */
po_[kx] += z * pw_[kx];
/* printf("o=%g " , po_[kx]); */
}
/* printf("\n"); */
po_ += oc; /* next input line */
pw_ += kc; /* next mask line */
}
po_ += (or-kr)*oc; /* next output slice */
/* printf("\n"); */
}
t_++;
}
}
}
}
/*
3D Input, 3D kernel : convolve given volume with the given kernel, full convolution.
*/
void THTensor_(fullXCorr3Dptr)(real *r_,
real alpha,
real *t_, long it, long ir, long ic,
real *k_, long kt, long kr, long kc,
long st, long sr, long sc)
{
long or = (ir - 1) * sr + kr;
long oc = (ic - 1) * sc + kc;
long zz, xx, yy;
for(zz = 0; zz < it; zz++)
{
for(yy = 0; yy < ir; yy++)
{
for(xx = 0; xx < ic; xx++)
{
/* Outer product in two dimensions... (between input image and the mask) */
real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc;
real *pw_ = k_ + kt*kr*kc -1;
long kz, kx, ky;
for(kz = 0; kz < kt; kz++)
{
for(ky = 0; ky < kr; ky++)
{
real z = *t_ * alpha;
for(kx = 0; kx < kc; kx++) {
po_[kx] += z * pw_[-kx];
}
po_ += oc; /* next input line */
pw_ -= kc; /* next mask line */
}
po_ += (or-kr)*oc; /* next output slice */
}
t_++;
}
}
}
}
/*
3D Input, 3D kernel : convolve given image with the given kernel, valid convolution.
for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(validXCorr3DRevptr)(real *r_,
real alpha,
real *t_, long it, long ir, long ic,
real *k_, long kt, long kr, long kc,
long st, long sr, long sc)
{
long ot = it - (kt - 1) * st;
long or = ir - (kr - 1) * sr;
long oc = ic - (kc - 1) * sc;
long zz, xx, yy;
for(zz = 0; zz < kt; zz++)
{
for(yy = 0; yy < kr; yy++)
{
for(xx = 0; xx < kc; xx++)
{
real *po_ = r_;
real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc;
real z = *k_++ * alpha;
long kz, kx, ky;
for(kz = 0; kz < ot; kz++)
{
for(ky = 0; ky < or; ky++)
{
for(kx = 0; kx < oc; kx++)
po_[kx] += z * pi_[kx];
pi_ += ic;
po_ += oc;
}
pi_ += (ir-or)*ic; /* next input slice */
}
}
}
}
}
void THTensor_(conv2d)(real* output_data,
real alpha,
real* ptr_input, long nInputRows, long nInputCols,
real* ptr_weight, long nKernelRows, long nKernelCols,
long srow, long scol,
const char *vf, const char *xc)
{
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'");
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
void THTensor_(conv3d)(real* output_data,
real alpha,
real* ptr_input, long nInputDepth, long nInputRows, long nInputCols,
real* ptr_weight, long nKernelDepth, long nKernelRows, long nKernelCols,
long sdepth, long srow, long scol,
const char *vf, const char *xc)
{
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'");
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
else
THTensor_(fullConv3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
else
THTensor_(validConv3Dptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
}
long THTensor_(convsize)(long x, long k, long s, const char* vf)
{
THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'");
if (*vf == 'V')
return (x-k)/s + 1;
else
return (x-1)*s + k;
}
/*
3D input, 3D kernel, 4D output
like rank1 update
A <- xx' + beta*A
for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelPlane, nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel");
nOutputRows = nInputRows - (nKernelRows - 1) * srow;
nOutputCols = nInputCols - (nKernelCols - 1) * scol;
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nKernelPlane; k++)
{
long i;
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get output */
real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows;
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
THTensor_(validXCorr2DRevptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
/* Next output plane */
/* output_data += nOutputCols*nOutputRows; */
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 4D output
like rank1 update
A <- xx' + beta*A
for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol)
{
long nbatch, nInputPlane, nInputRows, nInputCols;
long nKernelPlane, nKernelRows, nKernelCols;
long nOutputRows, nOutputCols;
long istride0, kstride0, istride1, kstride1;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
istride1 = input->stride[1];
nbatch = input->size[0];
nInputPlane = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelPlane = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel");
THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size");
nOutputRows = nInputRows - (nKernelRows - 1) * srow;
nOutputCols = nInputCols - (nKernelCols - 1) * scol;
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nKernelPlane; k++)
{
long i;
for(i = 0; i < nInputPlane; i++)
{
long p;
for(p = 0; p < nbatch; p++)
{
/* get kernel */
real *ptr_weight = weight_data + p*kstride0 + k*kstride1;
/* get output */
real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows;
/* get input */
real *ptr_input = input_data + p*istride0 + i*istride1;
/* do image, kernel convolution */
THTensor_(validXCorr2DRevptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
/* Next output plane */
/* output_data += nOutputCols*nOutputRows; */
}
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 4D output
like rank1 update
A <- xx' + beta*A
*/
void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelPlane, nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel");
if (*vf == 'F') {
nOutputRows = (nInputRows - 1) * srow + nKernelRows;
nOutputCols = (nInputCols - 1) * scol + nKernelCols;
} else { /* valid */
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
}
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]*r_->size[1]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nKernelPlane; k++)
{
long i;
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get output */
real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows;
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
/* Next output plane */
/* output_data += nOutputCols*nOutputRows; */
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 4D kernel, 3D output
matrix vector product like
y <- Ax + beta*y
*/
void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long istride0, kstride0, kstride1;
THTensor *input;
THTensor* kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) {
kernel = THTensor_(newContiguous)(k_);
} else {
THTensor_(retain)(k_);
kernel = k_;
}
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = kernel->size[0];
THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel");
if (*vf == 'F') {
nOutputRows = (nInputRows - 1) * srow + nKernelRows;
nOutputCols = (nInputCols - 1) * scol + nKernelCols;
} else { /* valid */
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
}
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(k)
for (k = 0; k < r_->size[0]; k++)
{
real* ptr_output = output_data + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
#pragma omp parallel for private(k)
for(k = 0; k < nOutputPlane; k++)
{
long i;
/* get output */
real *ptr_output = output_data + k*nOutputCols*nOutputRows;
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
real *ptr_input = input_data + i*istride0;
/* do image, kernel convolution */
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
/* Next output plane */
/* output_data += nOutputCols*nOutputRows;*/
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 4D kernel, 3D output
matrix vector product like
y <- Ax + beta*y
*/
void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long kstride0, kstride1;
THTensor *input;
THTensor* kernel;
long nbatch;
ptrdiff_t nelem;
real *input_data;
real *weight_data;
real *output_data;
long p;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) {
kernel = THTensor_(newContiguous)(k_);
} else {
THTensor_(retain)(k_);
kernel = k_;
}
nbatch = input->size[0];
nInputPlane = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = kernel->size[0];
THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel");
if (*vf == 'F') {
nOutputRows = (nInputRows - 1) * srow + nKernelRows;
nOutputCols = (nInputCols - 1) * scol + nKernelCols;
} else { /* valid */
nOutputRows = (nInputRows - nKernelRows) / srow + 1;
nOutputCols = (nInputCols - nKernelCols) / scol + 1;
}
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
/*THTensor_(zero)(r_);*/
#pragma omp parallel for private(p)
for (p=0; p < r_->size[0]; p++)
{
long k;
for (k = 0; k < r_->size[1]; k++)
{
real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] = 0.0;
}
}
}
else if (beta != 1)
{
/*THTensor_(mul)(r_, beta);*/
#pragma omp parallel for private(p)
for(p=0; p < r_->size[0]; p++)
{
long k;
for (k = 0; k < r_->size[1]; k++)
{
real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows;
long l;
for (l = 0; l < nOutputRows*nOutputCols; l++)
ptr_output[l] *= beta;
}
}
}
#pragma omp parallel for private(p)
for(p=0; p < nbatch; p++)
{
long k;
for(k = 0; k < nOutputPlane; k++)
{
long i;
/* get output */
real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows;
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols;
/* do image, kernel convolution */
if (*vf == 'F')
if (*xc == 'X')
THTensor_(fullXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(fullConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
if (*xc == 'X')
THTensor_(validXCorr2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
else
THTensor_(validConv2Dptr)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol);
}
/* Next output plane */
/* output_data += nOutputCols*nOutputRows;*/
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
2D input, 2D kernel, 2D output
scalar multiplication like
y <- x*y + beta*y
*/
void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
THTensor *input;
THTensor* kernel;
long nInputRows;
long nInputCols;
long nKernelRows;
long nKernelCols;
long nOutputRows, nOutputCols;
real *ptr_input;
real *ptr_weight;
real *output_data;
ptrdiff_t nelem;
THArgCheck(t_->nDimension == 2 , 3, "input: 2D Tensor expected");
THArgCheck(k_->nDimension == 2 , 4, "kernel: 2D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputRows = input->size[0];
nInputCols = input->size[1];
nKernelRows = kernel->size[0];
nKernelCols = kernel->size[1];
THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel");
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize2d)(r_, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
THTensor_(zero)(r_);
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
ptr_input = THTensor_(data)(input);
ptr_weight = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
/* do image, kernel convolution */
THTensor_(conv2d)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol, vf, xc);
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 3D output
component wise multiplication like
y <- y.*x + beta*y
*/
void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel");
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nOutputPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + k*istride0;
/* do image, kernel convolution */
THTensor_(conv2d)(output_data,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol, vf, xc);
/* Next output plane */
output_data += nOutputCols*nOutputRows;
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 3D output
component wise multiplication like with a permutation map
y <- y.*x + beta*y
*/
void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputRows, nInputCols;
long nKernelRows, nKernelCols;
long nOutputPlane, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor* kernel;
real *input_data;
real *weight_data;
real *output_data;
long nmaps;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols)
|| *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel");
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
nmaps = map->size[0];
for(k = 0; k < nmaps; k++)
{
/* get indices */
long from = (long)THTensor_(get2d)(map,k,0)-1;
long to = (long)THTensor_(get2d)(map,k,1)-1;
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + from*istride0;
/* get output */
real *ptr_output = output_data + to*nOutputRows*nOutputCols;
/* do image, kernel convolution */
THTensor_(conv2d)(ptr_output,
alpha,
ptr_input, nInputRows, nInputCols,
ptr_weight, nKernelRows, nKernelCols,
srow, scol, vf, xc);
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 5D output
like rank1 update
A <- xx' + beta*A
for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for
calculating derivatives wrt a kernel that is applied with stride sr,sc != 1
*/
void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
long sdepth, long srow, long scol)
{
long nInputPlane, nInputDepth, nInputRows, nInputCols;
long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols;
long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k, i;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelDepth= kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv3DRevger : Input image is smaller than kernel");
nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth;
nOutputRows = nInputRows - (nKernelRows - 1) * srow;
nOutputCols = nInputCols - (nKernelCols - 1) * scol;
nelem = THTensor_(nElement)(r_);
THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nKernelPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
THTensor_(validXCorr3DRevptr)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol);
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 5D output
like rank1 update
A <- xx' + beta*A
*/
void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
long sdepth, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputDepth, nInputRows, nInputCols;
long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols;
long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k, i;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nKernelPlane = kernel->size[0];
nKernelDepth = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
nOutputPlane = nInputPlane * kernel->size[0];
THArgCheck((nInputDepth >= nKernelDepth
&& nInputRows >= nKernelRows
&& nInputCols >= nKernelCols)
|| *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nKernelPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data+k*kstride0;
for(i = 0; i < nInputPlane; i++)
{
/* get input */
real *ptr_input = input_data+i*istride0;
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 5D kernel, 4D output
matrix vector product like
y <- Ax + beta*y
*/
void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
long sdepth, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputDepth, nInputRows, nInputCols;
long nKernelDepth, nKernelRows, nKernelCols;
long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
long istride0, kstride0, kstride1;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k, i;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 5 , 4, "kernel: 5D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) {
kernel = THTensor_(newContiguous)(k_);
} else {
THTensor_(retain)(k_);
kernel = k_;
}
nInputPlane = input->size[0];
istride0 = input->stride[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
kstride1 = kernel->stride[1];
nKernelDepth = kernel->size[2];
nKernelRows = kernel->size[3];
nKernelCols = kernel->size[4];
nOutputPlane = kernel->size[0];
THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nOutputPlane; k++)
{
for(i = 0; i < nInputPlane; i++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0 + i*kstride1;
/* get input */
real *ptr_input = input_data + i*istride0;
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
}
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
3D input, 3D kernel, 3D output
scalar multiplication like
y <- x*y + beta*y
*/
void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
long sdepth, long srow, long scol, const char *vf, const char *xc)
{
THTensor *input;
THTensor* kernel;
long nInputDepth;
long nInputRows;
long nInputCols;
long nKernelDepth;
long nKernelRows;
long nKernelCols;
long nOutputDepth, nOutputRows, nOutputCols;
real *ptr_input;
real *ptr_weight;
real *output_data;
ptrdiff_t nelem;
THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected");
THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
nInputDepth = input->size[0];
nInputRows = input->size[1];
nInputCols = input->size[2];
nKernelDepth = kernel->size[0];
nKernelRows = kernel->size[1];
nKernelCols = kernel->size[2];
THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize3d)(r_, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
THTensor_(zero)(r_);
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
ptr_input = THTensor_(data)(input);
ptr_weight = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 4D output
component wise multiplication like
y <- y.*x + beta*y
*/
void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_,
long sdepth, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputDepth, nInputRows, nInputCols;
long nKernelDepth, nKernelRows, nKernelCols;
long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
real *input_data;
real *weight_data;
real *output_data;
ptrdiff_t nelem;
long k;
THArgCheck(t_->nDimension == 4 , 3, "input: 3D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 3D Tensor expected");
THArgCheck(srow >= 1, 5, "Stride should be a positive integer");
THArgCheck(scol >= 1, 6, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelDepth = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
for(k = 0; k < nOutputPlane; k++)
{
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + k*istride0;
/* do image, kernel convolution */
THTensor_(conv3d)(output_data,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
/* Next output plane */
output_data += nOutputDepth*nOutputCols*nOutputRows;
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
/*
4D input, 4D kernel, 4D output
component wise multiplication like with a permutation map
y <- y.*x + beta*y
*/
void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map,
long sdepth, long srow, long scol, const char *vf, const char *xc)
{
long nInputPlane, nInputDepth, nInputRows, nInputCols;
long nKernelDepth, nKernelRows, nKernelCols;
long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols;
long istride0, kstride0;
THTensor *input;
THTensor *kernel;
ptrdiff_t nelem;
real *input_data;
real *weight_data;
real *output_data;
long nmaps;
long k;
THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected");
THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected");
THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected");
THArgCheck(srow >= 1, 6, "Stride should be a positive integer");
THArgCheck(scol >= 1, 7, "Stride should be a positive integer");
THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'");
THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'");
input = THTensor_(newContiguous)(t_);
kernel = THTensor_(newContiguous)(k_);
istride0 = input->stride[0];
nInputPlane = input->size[0];
nInputDepth = input->size[1];
nInputRows = input->size[2];
nInputCols = input->size[3];
kstride0 = kernel->stride[0];
nOutputPlane = kernel->size[0];
nKernelDepth = kernel->size[1];
nKernelRows = kernel->size[2];
nKernelCols = kernel->size[3];
THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes");
THArgCheck((nInputDepth >= nKernelDepth
&& nInputRows >= nKernelRows
&& nInputCols >= nKernelCols) || *vf == 'F',
2, "conv3Dmap : Input image is smaller than kernel");
nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf);
nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf);
nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf);
nelem = THTensor_(nElement)(r_);
THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols);
if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_))
{
THTensor_(zero)(r_);
}
else if (beta != 1)
THTensor_(mul)(r_, r_, beta);
input_data = THTensor_(data)(input);
weight_data = THTensor_(data)(kernel);
output_data = THTensor_(data)(r_);
nmaps = map->size[0];
for(k = 0; k < nmaps; k++)
{
/* get indices */
long from = (long)THTensor_(get2d)(map,k,0)-1;
long to = (long)THTensor_(get2d)(map,k,1)-1;
/* get kernel */
real *ptr_weight = weight_data + k*kstride0;
/* get input */
real *ptr_input = input_data + from*istride0;
/* get output */
real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols;
/* do image, kernel convolution */
THTensor_(conv3d)(ptr_output,
alpha,
ptr_input, nInputDepth, nInputRows, nInputCols,
ptr_weight, nKernelDepth, nKernelRows, nKernelCols,
sdepth, srow, scol, vf, xc);
}
THTensor_(free)(input);
THTensor_(free)(kernel);
}
#endif
|
GB_binop__lor_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_fp64)
// A*D function (colscale): GB (_AxD__lor_fp64)
// D*A function (rowscale): GB (_DxB__lor_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_fp64)
// C=scalar+B GB (_bind1st__lor_fp64)
// C=scalar+B' GB (_bind1st_tran__lor_fp64)
// C=A+scalar GB (_bind2nd__lor_fp64)
// C=A'+scalar GB (_bind2nd_tran__lor_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_FP64 || GxB_NO_LOR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph_decomposition.h | #ifndef __GRAPH_DECOMPOSITION_H__
#define __GRAPH_DECOMPOSITION_H__
#include "graph.h"
#include <random>
#include <math.h>
#include <mutex>
#include <omp.h>
static float* genExp(int n, float rate, float* maxVal, int* maxId) {
std::default_random_engine generator; // note this will always generate the same values - which we want - for grading
std::exponential_distribution<double> distribution(rate);
float maxdu = -1.f;
int id = -1;
std::mutex mtx;
float* vals = (float*) malloc(sizeof(float) * n);
for (int i = 0; i < n; i++) {
float val = distribution(generator);
if (val > maxdu) {
mtx.lock();
if (val > maxdu) {
maxdu = val;
id = i;
}
mtx.unlock();
}
vals[i] = val;
}
*maxVal = maxdu;
*maxId = id;
return vals;
}
/**
* Given an array of floats, casts them all into
**/
static int* chopToInt(float* fdus, int n) {
int* dus = (int*)malloc(sizeof(int) * n);
#pragma omp parallel for schedule(dynamic, 512)
for (int i = 0; i < n; i++) {
dus[i] = (int)fdus[i];
}
return dus;
}
static int* getDus(int n, float rate, int* maxVal, int* maxId) {
float fmaxVal;
float* expVals = genExp(n, rate, &fmaxVal, maxId);
int* dus = chopToInt(expVals, n);
free(expVals);
*maxVal = (int)fmaxVal;
return dus;
}
void decompose(graph *g, int *decomp, int* dus, int maxVal, int maxId);
#endif
|
matrix_matrix_multiplication.c | // Matrix-Matrix Multiplication
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include<omp.h>
//using namespace std;
#define N 4
int A[N][N], B[N][N], C[N][N]; // declaring matrices of NxN size
int main ()
{
/* DECLARING VARIABLES */
int i, j, m; // indices for matrix multiplication
float t_1; // Execution time measures
clock_t c_1, c_2;
/* FILLING MATRICES WITH RANDOM NUMBERS */
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
A[i][j]= (rand()%5);
B[i][j]= (rand()%5);
}
}
// Display input matrix A:
printf("Matrix A:\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf("%d\t",A[i][j]);
}
printf("\n");
}
// Display input matrix B:
printf("Matrix B:\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf("%d\t",B[i][j]);
}
printf("\n");
}
c_1=clock(); // time measure:
/* MATRIX MULTIPLICATION */
printf("Max number of threads: %i \n",omp_get_max_threads());
#pragma omp parallel
#pragma omp single
{
printf("Number of threads: %i \n",omp_get_num_threads());
}
#pragma omp parallel for private(m,j)
// #pragma omp_set_num_threads(8)
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
C[i][j]=0.; // set initial value of resulting matrix C = 0
for(m=0;m<N;m++)
{
C[i][j]=A[i][m]*B[m][j]+C[i][j];
}
// printf("C: %d \t",C[i][j]);
}
// printf("\n");
}
// Display input matrix B:
printf("Matrix C:\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf("%d\t",C[i][j]);
}
printf("\n");
}
/* TIME MEASURE + OUTPUT */
c_2=clock(); // time measure:
t_1 = (float)(c_2-c_1)/CLOCKS_PER_SEC; // in seconds; - time elapsed for job row-wise
printf("Execution time: %f(in seconds) \n",t_1);
/* TERMINATE PROGRAM */
return 0;
}
/*
studen@student-ThinkCentre-M72e:~/HPC$ gcc -fopenmp matrix_matrix_multiplication.c -o mm
studen@student-ThinkCentre-M72e:~/HPC$ ./mm
Matrix A:
3 2 3 1
4 2 0 3
0 2 1 2
2 2 2 4
Matrix B:
1 0 0 2
1 2 4 1
1 1 3 4
0 3 0 2
Max number of threads: 4
Number of threads: 4
Matrix C:
8 10 17 22
6 13 8 16
3 11 11 10
6 18 14 22
Execution time: 0.029086(in seconds)
*/
|
GB_Scalar_extractElement.c | //------------------------------------------------------------------------------
// GB_Scalar_extractElement_template: x = S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Extract the value of single scalar, x = S, typecasting from the
// type of S to the type of x, as needed.
// Returns GrB_SUCCESS if the GrB_Scalar entry is present, and sets x to its
// value. Returns GrB_NO_VALUE if the GrB_Scalar is not present, and x is
// unmodified.
// This template constructs GrB_Scalar_extractElement_[TYPE] for each of the
// 13 built-in types, and the _UDT method for all user-defined types.
GrB_Info GB_EXTRACT_ELEMENT // extract a single entry from S
(
GB_XTYPE *x, // scalar to extract, not modified if not found
const GrB_Scalar S // GrB_Scalar to extract a scalar from
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_RETURN_IF_NULL_OR_FAULTY (S) ;
GB_RETURN_IF_NULL (x) ;
// delete any lingering zombies, assemble any pending tuples, and unjumble
if (GB_ANY_PENDING_WORK (S))
{
// extract scalar with pending tuples or zombies. It cannot be
// actually jumbled, but S->jumbled might true anyway.
GrB_Info info ;
GB_WHERE1 (GB_WHERE_STRING) ;
GB_BURBLE_START ("GrB_Scalar_extractElement") ;
GB_OK (GB_wait ((GrB_Matrix) S, "s", Context)) ;
GB_BURBLE_END ;
}
ASSERT (!GB_ANY_PENDING_WORK (S)) ;
// GB_XCODE and S must be compatible
GB_Type_code scode = S->type->code ;
if (!GB_code_compatible (GB_XCODE, scode))
{
return (GrB_DOMAIN_MISMATCH) ;
}
if (GB_nnz ((GrB_Matrix) S) == 0 // empty
|| (S->p != NULL && S->p [1] == 0) // sparse/hyper with no entry
|| (S->b != NULL && S->b [0] == 0)) // bitmap with no entry
{
// quick return
return (GrB_NO_VALUE) ;
}
//--------------------------------------------------------------------------
// extract the scalar
//--------------------------------------------------------------------------
#if !defined ( GB_UDT_EXTRACT )
if (GB_XCODE == scode)
{
// copy S into x, no typecasting, for built-in types only.
GB_XTYPE *restrict Sx = ((GB_XTYPE *) (S->x)) ;
(*x) = Sx [0] ;
}
else
#endif
{
// typecast S into x
GB_cast_scalar (x, GB_XCODE, S->x, scode, S->type->size) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
#undef GB_UDT_EXTRACT
#undef GB_EXTRACT_ELEMENT
#undef GB_XTYPE
#undef GB_XCODE
|
nvptx_asm_delayed_diags.c | // RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -verify -DDIAGS -DIMMEDIATE -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// RUN: %clang_cc1 -verify -DDIAGS -DDELAYED -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
#ifndef DIAGS
// expected-no-diagnostics
#endif // DIAGS
void foo(int r) {
#ifdef IMMEDIATE
// expected-error@+4 {{invalid input constraint 'mx' in asm}}
#endif // IMMEDIATE
__asm__("PR3908 %[lf] %[xx] %[li] %[r]"
: [ r ] "+r"(r)
: [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0)));
}
#ifdef IMMEDIATE
#pragma omp declare target to(foo)
#endif //IMMEDIATE
#ifdef IMMEDIATE
#pragma omp declare target
#endif //IMMEDIATE
void t1(int r) {
#ifdef DIAGS
// expected-error@+4 {{invalid input constraint 'mx' in asm}}
#endif // DIAGS
__asm__("PR3908 %[lf] %[xx] %[li] %[r]"
: [ r ] "+r"(r)
: [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0)));
}
unsigned t2(signed char input) {
unsigned output;
#ifdef DIAGS
// expected-error@+3 {{invalid output constraint '=a' in asm}}
#endif // DIAGS
__asm__("xyz"
: "=a"(output)
: "0"(input));
return output;
}
double t3(double x) {
register long double result;
#ifdef DIAGS
// expected-error@+3 {{invalid output constraint '=t' in asm}}
#endif // DIAGS
__asm __volatile("frndint"
: "=t"(result)
: "0"(x));
return result;
}
unsigned char t4(unsigned char a, unsigned char b) {
unsigned int la = a;
unsigned int lb = b;
unsigned int bigres;
unsigned char res;
#ifdef DIAGS
// expected-error@+3 {{invalid output constraint '=la' in asm}}
#endif // DIAGS
__asm__("0:\n1:\n"
: [ bigres ] "=la"(bigres)
: [ la ] "0"(la), [ lb ] "c"(lb)
: "edx", "cc");
res = bigres;
return res;
}
void t5(void) {
#ifdef DIAGS
// expected-error@+6 {{unknown register name 'st' in asm}}
#endif // DIAGS
__asm__ __volatile__(
"finit"
:
:
: "st", "st(1)", "st(2)", "st(3)",
"st(4)", "st(5)", "st(6)", "st(7)",
"fpsr", "fpcr");
}
typedef long long __m256i __attribute__((__vector_size__(32)));
void t6(__m256i *p) {
#ifdef DIAGS
// expected-error@+3 {{unknown register name 'ymm0' in asm}}
#endif // DIAGS
__asm__ volatile("vmovaps %0, %%ymm0" ::"m"(*(__m256i *)p)
: "ymm0");
}
#ifdef IMMEDIATE
#pragma omp end declare target
#endif //IMMEDIATE
int main() {
#ifdef DELAYED
#pragma omp target
#endif // DELAYED
{
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t1(0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t2(0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t3(0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t4(0, 0);
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t5();
#ifdef DELAYED
// expected-note@+2 {{called by 'main'}}
#endif // DELAYED
t6(0);
}
return 0;
}
|
_dd_linalg.c | /* Python extension module for linear algebra functions.
*
* Copyright (C) 2021 Markus Wallerberger and others
* SPDX-License-Identifier: MIT
*/
#include "Python.h"
#include "math.h"
#include "stdio.h"
#include "dd_arith.h"
#include "dd_linalg.h"
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/npy_3kcompat.h"
/**
* Allows parameter to be marked unused
*/
#define MARK_UNUSED(x) do { (void)(x); } while(false)
/************************ Linear algebra ***************************/
static void u_matmulq(char **args, const npy_intp *dims, const npy_intp* steps,
void *data)
{
// signature (n;i,j),(n;j,k)->(n;i,k)
const npy_intp nn = dims[0], ii = dims[1], jj = dims[2], kk = dims[3];
const npy_intp _san = steps[0], _sbn = steps[1], _scn = steps[2],
_sai = steps[3], _saj = steps[4], _sbj = steps[5],
_sbk = steps[6], _sci = steps[7], _sck = steps[8];
char *_a = args[0], *_b = args[1], *_c = args[2];
const npy_intp sai = _sai / sizeof(ddouble), saj = _saj / sizeof(ddouble),
sbj = _sbj / sizeof(ddouble), sbk = _sbk / sizeof(ddouble),
sci = _sci / sizeof(ddouble), sck = _sck / sizeof(ddouble);
for (npy_intp n = 0; n != nn; ++n, _a += _san, _b += _sbn, _c += _scn) {
const ddouble *a = (const ddouble *)_a, *b = (const ddouble *)_b;
ddouble *c = (ddouble *)_c;
#pragma omp parallel for collapse(2)
for (npy_intp i = 0; i < ii; ++i) {
for (npy_intp k = 0; k < kk; ++k) {
ddouble val = Q_ZERO, tmp;
for (npy_intp j = 0; j < jj; ++j) {
tmp = mulqq(a[i * sai + j * saj], b[j * sbj + k * sbk]);
val = addqq(val, tmp);
}
c[i * sci + k * sck] = val;
}
}
}
MARK_UNUSED(data);
}
/****************************** Helper functions *************************/
static void ensure_inplace_2(
char *in, char *out, npy_intp n1, npy_intp si1, npy_intp so1,
npy_intp n2, npy_intp si2, npy_intp so2)
{
if (in == out)
return;
char *in1 = in, *out1 = out;
for (npy_intp i1 = 0; i1 != n1; ++i1, in1 += si1, out1 += so1) {
char *in2 = in1, *out2 = out1;
for (npy_intp i2 = 0; i2 != n2; ++i2, in2 += si2, out2 += so2) {
char *inx = in2, *outx = out2;
*(ddouble *)outx = *(ddouble *)inx;
}
}
}
static void ensure_inplace_3(
char *in, char *out, npy_intp n1, npy_intp si1, npy_intp so1,
npy_intp n2, npy_intp si2, npy_intp so2, npy_intp n3, npy_intp si3,
npy_intp so3)
{
if (in == out)
return;
char *in1 = in, *out1 = out;
for (npy_intp i1 = 0; i1 != n1; ++i1, in1 += si1, out1 += so1) {
char *in2 = in1, *out2 = out1;
for (npy_intp i2 = 0; i2 != n2; ++i2, in2 += si2, out2 += so2) {
char *in3 = in2, *out3 = out2;
for (npy_intp i3 = 0; i3 != n3; ++i3, in3 += si3, out3 += so3) {
char *inx = in3, *outx = out3;
*(ddouble *)outx = *(ddouble *)inx;
}
}
}
}
/*************************** More complicated ***********************/
static void u_normq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;i)->(n;)
const npy_intp nn = dims[0], ii = dims[1];
const npy_intp san = steps[0], sbn = steps[1], _sai = steps[2];
char *_a = args[0], *_b = args[1];
for (npy_intp n = 0; n != nn; ++n, _a += san, _b += sbn) {
*(ddouble *)_b = normq((const ddouble *)_a, ii, _sai / sizeof(ddouble));
}
MARK_UNUSED(data);
}
static void u_householderq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;i)->(n;),(n;i)
const npy_intp nn = dims[0], ii = dims[1];
const npy_intp _san = steps[0], _sbn = steps[1], _scn = steps[2],
_sai = steps[3], _sci = steps[4];
char *_a = args[0], *_b = args[1], *_c = args[2];
for (npy_intp n = 0; n != nn; ++n, _a += _san, _b += _sbn, _c += _scn) {
*(ddouble *)_b = householderq(
(const ddouble *)_a, (ddouble *)_c, ii,
_sai / sizeof(ddouble), _sci / sizeof(ddouble));
}
MARK_UNUSED(data);
}
static void u_rank1updateq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;i,j),(n;i),(n;j)->(n;i,j)
const npy_intp nn = dims[0], ii = dims[1], jj = dims[2];
const npy_intp _san = steps[0], _sbn = steps[1], _scn = steps[2],
_sdn = steps[3], _sai = steps[4], _saj = steps[5],
_sbi = steps[6], _scj = steps[7], _sdi = steps[8],
_sdj = steps[9];
char *_a = args[0], *_b = args[1], *_c = args[2], *_d = args[3];
ensure_inplace_3(_a, _d, nn, _san, _sdn, ii, _sai, _sdi, jj, _saj, _sdj);
for (npy_intp n = 0; n != nn; ++n, _a += _san, _b += _sbn, _c += _scn) {
rank1updateq(
(ddouble *)_d, _sai / sizeof(ddouble), _saj / sizeof(ddouble),
(const ddouble *)_b, _sbi / sizeof(ddouble),
(const ddouble *)_c, _scj / sizeof(ddouble), ii, jj);
}
MARK_UNUSED(data);
}
static void u_jacobisweepq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;i,j),(n;i=j,j)->(n;i,j),(n;i=j,j);(n,)
const npy_intp nn = dims[0], ii = dims[1], jj = dims[2];
const npy_intp _san = steps[0], _sbn = steps[1], _scn = steps[2],
_sdn = steps[3], _sen = steps[4], _sai = steps[5],
_saj = steps[6], _sbi = steps[7], _sbj = steps[8],
_sci = steps[9], _scj = steps[10], _sdi = steps[11],
_sdj = steps[12];
char *_a = args[0], *_b = args[1], *_c = args[2], *_d = args[3],
*_e = args[4];
ensure_inplace_3(_a, _c, nn, _san, _scn, ii, _sai, _sci, jj, _saj, _scj);
ensure_inplace_3(_b, _d, nn, _sbn, _sdn, jj, _sbi, _sdi, jj, _sbj, _sdj);
for (npy_intp n = 0; n != nn; ++n, _c += _scn, _d += _sdn, _e += _sen) {
ddouble *c = (ddouble *)_c, *d = (ddouble *)_d, *e = (ddouble *)_e;
const npy_intp
sci = _sci / sizeof(ddouble), scj = _scj / sizeof(ddouble),
sdi = _sdi / sizeof(ddouble), sdj = _sdj / sizeof(ddouble);
*e = jacobi_sweep(c, sci, scj, d, sdi, sdj, ii, jj);
}
MARK_UNUSED(data);
}
static void u_givensq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;2)->(n;2),(n;2,2)
const npy_intp nn = dims[0];
const npy_intp san = steps[0], sbn = steps[1], scn = steps[2],
sai = steps[3], sbi = steps[4], sci = steps[5],
scj = steps[6];
char *_a = args[0], *_b = args[1], *_c = args[2];
for (npy_intp n = 0; n != nn; ++n, _a += san, _b += sbn, _c += scn) {
ddouble f = *(ddouble *) _a;
ddouble g = *(ddouble *) (_a + sai);
ddouble c, s, r;
givensq(f, g, &c, &s, &r);
*(ddouble *)_b = r;
*(ddouble *)(_b + sbi) = Q_ZERO;
*(ddouble *)_c = c;
*(ddouble *)(_c + scj) = s;
*(ddouble *)(_c + sci) = negq(s);
*(ddouble *)(_c + sci + scj) = c;
}
MARK_UNUSED(data);
}
static void u_givens_seqq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;i,2),(n;i,j)->(n;i,j)
const npy_intp nn = dims[0], ii = dims[1], jj = dims[3];
const npy_intp _san = steps[0], _sbn = steps[1], _scn = steps[2],
_sai = steps[3], _saq = steps[4], _sbi = steps[5],
_sbj = steps[6], _sci = steps[7], _scj = steps[8];
char *_a = args[0], *_b = args[1], *_c = args[2];
ensure_inplace_3(_b, _c, nn, _sbn, _scn, ii, _sbi, _sci, jj, _sbj, _scj);
for (npy_intp n = 0; n != nn; ++n, _a += _san, _c += _scn) {
/* The rotation are interdependent, so we splice the array in
* the other direction.
*/
#pragma omp parallel for
for (npy_intp j = 0; j < jj; ++j) {
for (npy_intp i = 0; i < ii - 1; ++i) {
ddouble *c_x = (ddouble *)(_c + i *_sci + j * _scj);
ddouble *c_y = (ddouble *)(_c + (i + 1) *_sci + j * _scj);
ddouble g_cos = *(ddouble *)(_a + i * _sai);
ddouble g_sin = *(ddouble *)(_a + i * _sai + _saq);
lmul_givensq(c_x, c_y, g_cos, g_sin, *c_x, *c_y);
}
}
}
MARK_UNUSED(data);
}
static void u_golub_kahan_chaseq(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;i),(n;i)->(n;i),(n;i),(n;i,4)
const npy_intp nn = dims[0], ii = dims[1];
const npy_intp _san = steps[0], _sbn = steps[1], _scn = steps[2],
_sdn = steps[3], _sen = steps[4], _sai = steps[5],
_sbi = steps[6], _sci = steps[7], _sdi = steps[8],
_sei = steps[9], _se4 = steps[10];
char *_a = args[0], *_b = args[1], *_c = args[2], *_d = args[3],
*_e = args[4];
ensure_inplace_2(_a, _c, nn, _san, _scn, ii, _sai, _sci);
ensure_inplace_2(_b, _d, nn, _sbn, _sdn, ii, _sbi, _sdi);
if (_se4 != sizeof(ddouble) || _sei != 4 * sizeof(ddouble)) {
fprintf(stderr, "rot is not contiguous, but needs to be");
return;
}
for (npy_intp n = 0; n != nn; ++n, _c += _scn, _d += _sdn, _e += _sen) {
golub_kahan_chaseq((ddouble *)_c, _sci / sizeof(ddouble),
(ddouble *)_d, _sdi / sizeof(ddouble),
ii, (ddouble *)_e);
}
MARK_UNUSED(data);
}
static void u_svd_2x2(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;2,2)->(n;2,2),(n;2),(n;2,2)
const npy_intp nn = dims[0];
const npy_intp san = steps[0], sbn = steps[1], scn = steps[2],
sdn = steps[3], sai = steps[4], saj = steps[5],
sbi = steps[6], sbj = steps[7], sci = steps[8],
sdi = steps[9], sdj = steps[10];
char *_a = args[0], *_b = args[1], *_c = args[2], *_d = args[3];
for (npy_intp n = 0; n != nn;
++n, _a += san, _b += sbn, _c += scn, _d += sdn) {
ddouble a11 = *(ddouble *) _a;
ddouble a12 = *(ddouble *) (_a + saj);
ddouble a21 = *(ddouble *) (_a + sai);
ddouble a22 = *(ddouble *) (_a + sai + saj);
ddouble smin, smax, cu, su, cv, sv;
svd_2x2(a11, a12, a21, a22, &smin, &smax, &cv, &sv, &cu, &su);
*(ddouble *)_b = cu;
*(ddouble *)(_b + sbj) = negq(su);
*(ddouble *)(_b + sbi) = su;
*(ddouble *)(_b + sbi + sbj) = cu;
*(ddouble *)_c = smax;
*(ddouble *)(_c + sci) = smin;
*(ddouble *)_d = cv;
*(ddouble *)(_d + sdj) = sv;
*(ddouble *)(_d + sdi) = negq(sv);
*(ddouble *)(_d + sdi + sdj) = cv;
}
MARK_UNUSED(data);
}
static void u_svvals_2x2(
char **args, const npy_intp *dims, const npy_intp* steps, void *data)
{
// signature (n;2,2)->(n;2)
const npy_intp nn = dims[0];
const npy_intp san = steps[0], sbn = steps[1], sai = steps[2],
saj = steps[3], sbi = steps[4];
char *_a = args[0], *_b = args[1];
for (npy_intp n = 0; n != nn; ++n, _a += san, _b += sbn) {
ddouble a11 = *(ddouble *) _a;
ddouble a12 = *(ddouble *) (_a + saj);
ddouble a21 = *(ddouble *) (_a + sai);
ddouble a22 = *(ddouble *) (_a + sai + saj);
ddouble smin, smax;
svd_2x2(a11, a12, a21, a22, &smin, &smax, NULL, NULL, NULL, NULL);
*(ddouble *)_b = smax;
*(ddouble *)(_b + sbi) = smin;
}
MARK_UNUSED(data);
}
/* ----------------------- Python stuff -------------------------- */
static PyObject *module;
static PyObject *numpy_module = NULL;
static int type_num;
static PyObject *make_module()
{
static PyMethodDef no_methods[] = {
{NULL, NULL, 0, NULL} // No methods defined
};
static struct PyModuleDef module_def = {
PyModuleDef_HEAD_INIT,
"_dd_linalg",
NULL,
-1,
no_methods,
NULL,
NULL,
NULL,
NULL
};
module = PyModule_Create(&module_def);
return module;
}
static int import_ddouble_dtype()
{
PyObject *dd_module = PyImport_ImportModule("xprec._dd_ufunc");
if (dd_module == NULL)
return -1;
// Now, ddouble should be defined
type_num = PyArray_TypeNumFromName("ddouble");
if (type_num == NPY_NOTYPE)
return -1;
return 0;
}
static int gufunc(
PyUFuncGenericFunction uloop, int nin, int nout,
const char *signature, const char *name, const char *docstring,
bool in_numpy)
{
PyUFuncObject *ufunc = NULL;
int *arg_types = NULL, retcode = 0;
if (in_numpy) {
ufunc = (PyUFuncObject *)PyObject_GetAttrString(numpy_module, name);
} else {
ufunc = (PyUFuncObject *)PyUFunc_FromFuncAndDataAndSignature(
NULL, NULL, NULL, 0, nin, nout, PyUFunc_None, name,
docstring, 0, signature);
}
if (ufunc == NULL) goto error;
int *dtypes = PyMem_New(int, nin + nout);
if (dtypes == NULL) goto error;
for (int i = 0; i != nin + nout; ++i)
dtypes[i] = type_num;
retcode = PyUFunc_RegisterLoopForType(ufunc, type_num,
uloop, arg_types, NULL);
if (retcode < 0) goto error;
return PyModule_AddObject(module, name, (PyObject *)ufunc);
error:
if (!in_numpy)
Py_XDECREF(ufunc);
PyMem_Free(arg_types);
return -1;
}
PyMODINIT_FUNC PyInit__dd_linalg(void)
{
if (!make_module())
return NULL;
/* Initialize numpy things */
import_array();
import_umath();
numpy_module = PyImport_ImportModule("numpy");
if (numpy_module == NULL)
return NULL;
if (import_ddouble_dtype() < 0)
return NULL;
gufunc(u_normq, 1, 1, "(i)->()",
"norm", "Vector 2-norm", false);
gufunc(u_matmulq, 2, 1, "(i?,j),(j,k?)->(i?,k?)",
"matmul", "Matrix multiplication", true);
gufunc(u_givensq, 1, 2, "(2)->(2),(2,2)",
"givens", "Generate Givens rotation", false);
gufunc(u_givens_seqq, 2, 1, "(i,2),(i,j?)->(i,j?)",
"givens_seq", "apply sequence of givens rotation to matrix", false);
gufunc(u_householderq, 1, 2, "(i)->(),(i)",
"householder", "Generate Householder reflectors", false);
gufunc(u_rank1updateq, 3, 1, "(i,j),(i),(j)->(i,j)",
"rank1update", "Perform rank-1 update of matrix", false);
gufunc(u_svd_2x2, 1, 3, "(2,2)->(2,2),(2),(2,2)",
"svd2x2", "SVD of upper triangular 2x2 problem", false);
gufunc(u_svvals_2x2, 1, 1, "(2,2)->(2)",
"svvals2x2", "singular values of upper triangular 2x2 problem", false);
gufunc(u_jacobisweepq, 2, 3, "(i,j),(j,j)->(i,j),(j,j),()",
"jacobi_sweep", "Perform sweep of one-sided Jacobi rotations", false);
gufunc(u_golub_kahan_chaseq, 2, 3, "(i),(i)->(i),(i),(i,4)",
"golub_kahan_chase", "bidiagonal chase procedure", false);
/* Make dtype */
PyArray_Descr *dtype = PyArray_DescrFromType(NPY_CDOUBLE);
PyModule_AddObject(module, "dtype", (PyObject *)dtype);
/* Module is ready */
return module;
}
|
VerletClusterListsTest.h | /**
* @file VerletClusterListsTest.h
* @author nguyen
* @date 21.10.18
*/
#pragma once
#include <gtest/gtest.h>
#include "AutoPasTestBase.h"
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/verletClusterLists/traversals/VCLC06Traversal.h"
#include "autopas/particles/Particle.h"
#include "autopas/utils/WrapOpenMP.h"
#include "autopasTools/generators/RandomGenerator.h"
#include "mocks/MockFunctor.h"
#include "testingHelpers/commonTypedefs.h"
class VerletClusterListsTest : public AutoPasTestBase {};
class CollectParticlePairsFunctor : public autopas::Functor<autopas::Particle, CollectParticlePairsFunctor> {
public:
std::vector<std::pair<Particle *, Particle *>> _pairs{};
std::array<double, 3> _min;
std::array<double, 3> _max;
CollectParticlePairsFunctor(double cutoff, std::array<double, 3> min, std::array<double, 3> max)
: Functor(cutoff), _min(min), _max(max) {}
void initTraversal() override { _pairs.clear(); }
void AoSFunctor(Particle &i, Particle &j, bool newton3) override {
auto dist = autopas::utils::ArrayMath::sub(i.getR(), j.getR());
if (autopas::utils::ArrayMath::dot(dist, dist) > getCutoff() * getCutoff() or
not autopas::utils::inBox(i.getR(), _min, _max) or not autopas::utils::inBox(j.getR(), _min, _max))
return;
#if defined(AUTOPAS_OPENMP)
#pragma omp critical
#endif
{
_pairs.emplace_back(&i, &j);
if (newton3) _pairs.emplace_back(&j, &i);
};
}
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
bool isAppropriateClusterSize(unsigned int clusterSize, autopas::DataLayoutOption::Value dataLayout) const override {
return true;
}
auto getParticlePairs() { return _pairs; }
};
#if defined(AUTOPAS_OPENMP)
class CollectParticlesPerThreadFunctor : public autopas::Functor<autopas::Particle, CollectParticlesPerThreadFunctor> {
public:
int _currentColor{};
std::array<std::vector<std::set<Particle *>>, 8> _particlesPerThreadPerColor;
public:
CollectParticlesPerThreadFunctor() : Functor(0) {}
void initTraversal() override {
for (int i = 0; i < 8; i++) {
_particlesPerThreadPerColor[i].resize(autopas::autopas_get_max_threads());
}
}
void AoSFunctor(Particle &i, Particle &j, bool newton3) override {
if (i.isDummy() or j.isDummy()) {
return;
}
auto threadNum = autopas::autopas_get_thread_num();
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&i);
_particlesPerThreadPerColor[_currentColor][threadNum].insert(&j);
}
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
bool isAppropriateClusterSize(unsigned int clusterSize, autopas::DataLayoutOption::Value dataLayout) const override {
return dataLayout == autopas::DataLayoutOption::aos; // this functor supports clusters only for aos!
}
void nextColor(int newColor) { _currentColor = newColor; }
};
class ColoringTraversalWithColorChangeNotify
: public autopas::VCLC06Traversal<FPCell, CollectParticlesPerThreadFunctor, autopas::DataLayoutOption::aos, true> {
public:
ColoringTraversalWithColorChangeNotify(CollectParticlesPerThreadFunctor *functor, size_t clusterSize,
std::function<void(int)> whenColorChanges)
: autopas::VCLC06Traversal<FPCell, CollectParticlesPerThreadFunctor, autopas::DataLayoutOption::aos, true>(
functor, clusterSize) {
_whenColorChanges = std::move(whenColorChanges);
}
void notifyColorChange(unsigned long newColor) override { _whenColorChanges(newColor); }
private:
std::function<void(int)> _whenColorChanges;
};
#endif |
DRB020-privatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be put as private to avoid race condition
Data race pair: tmp@65 vs. tmp@66
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len];
#pragma omp parallel for private(i)
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for private(i) private(tmp)
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
for (i=0;i<len;i++)
printf("%d\n", a[i]);
return 0;
}
|
mandel-omp-task-Row.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
#pragma omp parallel
#pragma omp single
for (row = 0; row < height; ++row) {
#pragma omp task firstprivate(row) private(col)
for (col = 0; col < width; ++col) {
{
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
mkl_functions-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file mkl_functions-inl.h
* \brief Wrapper for MKL VML functions
* \author Tao Lv, Shufan Wu
*/
#ifndef MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_
#define MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_
#if MSHADOW_USE_MKL == 1
#include "mkl_vml.h"
namespace mxnet {
namespace op {
namespace mkl_func {
MSHADOW_XINLINE
static bool check_size(const size_t n) {
const size_t MKL_INT_MAX = (sizeof(MKL_INT) == sizeof(int)) ? INT_MAX : LLONG_MAX;
return (n <= MKL_INT_MAX);
}
MSHADOW_XINLINE
static bool check_type(const int t) {
return (t == mshadow::kFloat32 || t == mshadow::kFloat64);
}
#define MXNET_MKL_UNARY_MATH_FUNC(name, func) \
struct name { \
MSHADOW_XINLINE static void Vectorize(const index_t n, const float *src, float *dst) { \
vs##func(static_cast<MKL_INT>(n), src, dst); \
} \
MSHADOW_XINLINE static void Vectorize(const index_t n, const double *src, double *dst) { \
vd##func(static_cast<MKL_INT>(n), src, dst); \
} \
};
#define MXNET_MKL_BINARY_MATH_FUNC(name, func) \
struct name { \
MSHADOW_XINLINE static void Vectorize(const index_t n, \
const float *a, \
const float *b, \
float *c) { \
vs##func(static_cast<MKL_INT>(n), a, b, c); \
} \
MSHADOW_XINLINE static void Vectorize(const index_t n, \
const double *a, \
const double *b, \
double *c) { \
vd##func(static_cast<MKL_INT>(n), a, b, c); \
} \
};
MXNET_MKL_UNARY_MATH_FUNC(erf, Erf);
MXNET_MKL_UNARY_MATH_FUNC(exp, Exp);
MXNET_MKL_UNARY_MATH_FUNC(exp2, Exp2);
MXNET_MKL_UNARY_MATH_FUNC(exp10, Exp10);
MXNET_MKL_UNARY_MATH_FUNC(expm1, Expm1);
MXNET_MKL_UNARY_MATH_FUNC(log, Ln);
MXNET_MKL_UNARY_MATH_FUNC(log2, Log2);
MXNET_MKL_UNARY_MATH_FUNC(log10, Log10);
MXNET_MKL_UNARY_MATH_FUNC(log1p, Log1p);
MXNET_MKL_UNARY_MATH_FUNC(sin, Sin);
MXNET_MKL_UNARY_MATH_FUNC(cos, Cos);
MXNET_MKL_UNARY_MATH_FUNC(tan, Tan);
MXNET_MKL_UNARY_MATH_FUNC(asin, Asin);
MXNET_MKL_UNARY_MATH_FUNC(acos, Acos);
MXNET_MKL_UNARY_MATH_FUNC(atan, Atan);
MXNET_MKL_UNARY_MATH_FUNC(sinh, Sinh);
MXNET_MKL_UNARY_MATH_FUNC(cosh, Cosh);
MXNET_MKL_UNARY_MATH_FUNC(tanh, Tanh);
MXNET_MKL_UNARY_MATH_FUNC(asinh, Asinh);
MXNET_MKL_UNARY_MATH_FUNC(acosh, Acosh);
MXNET_MKL_UNARY_MATH_FUNC(atanh, Atanh);
MXNET_MKL_UNARY_MATH_FUNC(sqrt, Sqrt);
MXNET_MKL_UNARY_MATH_FUNC(abs, Abs);
MXNET_MKL_UNARY_MATH_FUNC(cbrt, Cbrt);
MXNET_MKL_UNARY_MATH_FUNC(round, Round);
MXNET_MKL_UNARY_MATH_FUNC(ceil, Ceil);
MXNET_MKL_UNARY_MATH_FUNC(floor, Floor);
MXNET_MKL_UNARY_MATH_FUNC(trunc, Trunc);
MXNET_MKL_UNARY_MATH_FUNC(lgamma, LGamma);
MXNET_MKL_UNARY_MATH_FUNC(tgamma, TGamma);
MXNET_MKL_UNARY_MATH_FUNC(square, Sqr);
MXNET_MKL_BINARY_MATH_FUNC(add, Add);
MXNET_MKL_BINARY_MATH_FUNC(sub, Sub);
MXNET_MKL_BINARY_MATH_FUNC(mul, Mul);
MXNET_MKL_BINARY_MATH_FUNC(pow, Pow);
MXNET_MKL_BINARY_MATH_FUNC(hypot, Hypot);
template <typename DType>
MSHADOW_XINLINE static void sum_(index_t n, DType *in, DType *dst) {
DType sum = 0.0f;
for (index_t i = 0; i < n; i++)
sum += in[i];
dst[0] = sum;
}
// LayerNorm on the last dimension
template <typename DType>
MSHADOW_XINLINE static void LayerNormLastDim(index_t m,
index_t n,
DType *a,
DType *b,
DType *gamma,
DType *beta,
DType *mean,
DType *var,
DType eps) {
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < m; i++) {
DType* in_offset = a + i * n;
DType* out_offset = b + i * n;
sum_(n, in_offset, &(mean[i]));
mean[i] /= n;
var[i] = 0.0f;
#if !defined(_MSC_VER)
#pragma omp simd
#endif
for (index_t j = 0; j < n; j++) {
out_offset[j] = in_offset[j] - mean[i];
var[i] += out_offset[j] * out_offset[j];
}
var[i] = math::sqrt(var[i] / n + eps);
#if !defined(_MSC_VER)
#pragma omp simd
#endif
for (index_t j = 0; j < n; j++) {
out_offset[j] = out_offset[j] * gamma[j] / var[i] + beta[j];
}
}
}
} // namespace mkl_func
} // namespace op
} // namespace mxnet
#endif // MSHADOW_USE_MKL == 1
#endif // MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_
|
DRB018-plusplus-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Data race on outLen due to ++ operation.
Adding private (outLen) can avoid race condition. But it is wrong semantically.
Data races on outLen also cause output[outLen++] to have data races.
Data race pairs (we allow two pairs to preserve the original code pattern):
1. outLen@72 vs. outLen@72
2. output[]@72 vs. output[]@72
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int input[1000];
int output[1000];
int main()
{
int i;
int inLen = 1000;
int outLen = 0;
#pragma omp parallel for private (i)
for (i = 0; i <= inLen - 1; i += 1) {
input[i] = i;
}
for (i = 0; i <= inLen - 1; i += 1) {
output[outLen++] = input[i];
}
printf("output[500]=%d\n",output[500]);
return 0;
}
|
util.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "thd_info.h"
#include "util.h"
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
val_t rand_val(void)
{
/* TODO: modify this to work based on the size of idx_t */
val_t v = 3.0 * ((val_t) rand() / (val_t) RAND_MAX);
if(rand() % 2 == 0) {
v *= -1;
}
return v;
}
idx_t rand_idx(void)
{
/* TODO: modify this to work based on the size of idx_t */
return (idx_t) (rand() << 16) | rand();
}
void fill_rand(
val_t * const restrict vals,
idx_t const nelems)
{
for(idx_t i=0; i < nelems; ++i) {
vals[i] = 1; //rand_val();
}
}
char * bytes_str(
size_t const bytes)
{
double size = (double)bytes;
int suff = 0;
const char *suffix[5] = {"B", "KB", "MB", "GB", "TB"};
while(size > 1024 && suff < 5) {
size /= 1024.;
++suff;
}
char * ret = NULL;
if(asprintf(&ret, "%0.2f%s", size, suffix[suff]) == -1) {
fprintf(stderr, "SPLATT: asprintf failed with %zu bytes.\n", bytes);
ret = NULL;
}
return ret;
}
idx_t argmax_elem(
idx_t const * const arr,
idx_t const N)
{
idx_t mkr = 0;
for(idx_t i=1; i < N; ++i) {
if(arr[i] > arr[mkr]) {
mkr = i;
}
}
return mkr;
}
idx_t argmin_elem(
idx_t const * const arr,
idx_t const N)
{
idx_t mkr = 0;
for(idx_t i=1; i < N; ++i) {
if(arr[i] < arr[mkr]) {
mkr = i;
}
}
return mkr;
}
int * get_primes(
int N,
int * nprimes)
{
int size = 10;
int * p = (int *) splatt_malloc(size * sizeof(int));
int np = 0;
while(N != 1) {
int i;
for(i=2; i <= N; ++i) {
if(N % i == 0) {
/* found the next prime */
break;
}
}
/* realloc if necessary */
if(size == np) {
p = (int *) realloc(p, size * 2 * sizeof(int));
}
p[np++] = i;
N /= i;
}
*nprimes = np;
return p;
}
void par_memcpy(
void * const restrict dst,
void const * const restrict src,
size_t const bytes)
{
#pragma omp parallel
{
int nthreads = splatt_omp_get_num_threads();
int tid = splatt_omp_get_thread_num();
size_t n_per_thread = (bytes + nthreads - 1)/nthreads;
size_t n_begin = SS_MIN(n_per_thread * tid, bytes);
size_t n_end = SS_MIN(n_begin + n_per_thread, bytes);
memcpy((char *)dst + n_begin, (char *)src + n_begin, n_end - n_begin);
}
}
|
optimizer.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
opt0->q_cond = NULL;
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
opt0->dm_cond = NULL;
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((4*opt->dm_cond[j*n+i] > dmin)
|| (4*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin)
|| ( opt->dm_cond[i*n+k] > dmin)
|| ( opt->dm_cond[i*n+l] > dmin));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(&int2e_sph, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(opt, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = CINTcgto_spheric(ish, bas);
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = CINTcgto_spheric(ish, bas);
dj = CINTcgto_spheric(jsh, bas);
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != int2e_sph(buf, NULL, shls, atm, natm, bas, nbas, env, NULL, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
opt->q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const int nao = ao_loc[nbas];
double dmax, tmp;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = fabs(pdm[i*nao+j]);
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
} }
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, atm, natm, bas, nbas, env);
}
|
GB_unaryop__lnot_int32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_bool
// op(A') function: GB_tran__lnot_int32_bool
// C type: int32_t
// A type: bool
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_bool
(
int32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__times_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint64)
// A*D function (colscale): GB (_AxD__times_uint64)
// D*A function (rowscale): GB (_DxB__times_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint64)
// C=scalar+B GB (_bind1st__times_uint64)
// C=scalar+B' GB (_bind1st_tran__times_uint64)
// C=A+scalar GB (_bind2nd__times_uint64)
// C=A'+scalar GB (_bind2nd_tran__times_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT64 || GxB_NO_TIMES_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
Neighborhood neighborhood = Neighborhood(n, out_index_, start_offset);
for (DestID_* item = neighborhood.begin(); item < neighborhood.end(); item++) {
printf("out, %d, %d\n", n, *item);
}
return neighborhood;
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
Neighborhood neighborhood = Neighborhood(n, in_index_, start_offset);
for (DestID_* item = neighborhood.begin(); item < neighborhood.end(); item++) {
printf("out, %d, %d\n", n, *item);
}
return neighborhood;
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(b.size());
InternalSystemSolveWithPhysics(A, Dxmodified, b, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, Dx);
} else {
InternalSystemSolveWithPhysics(A, Dx, b, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING("ResidualBasedBlockBuilderAndSolver") << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme,A,Dx,b,rModelPart);
Timer::Stop("ApplyConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, b);
SystemSolve(A, Dx, b);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
BuildRHSNoDirichlet(pScheme,rModelPart,b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const std::size_t i = dof_iterator->EquationId();
if (dof_iterator->IsFixed())
b[i] = 0.0;
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = OpenMPUtils::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it_elem.base()), dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it_cond.base()), dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( it->get() );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
int ndofs = static_cast<int>(BaseType::mDofSet.size());
#pragma omp parallel for firstprivate(ndofs)
for (int i = 0; i < static_cast<int>(ndofs); i++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i;
dof_iterator->SetEquationId(i);
}
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids.
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->FinalizeSolutionStep(r_process_info);
}
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const int i = (dof_iterator)->EquationId();
(dof_iterator)->GetSolutionStepReactionValue() = -b[i];
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
std::size_t system_size = A.size1();
std::vector<double> scaling_factors (system_size, 0.0);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
if(dof_iterator->IsFixed())
scaling_factors[k] = 0.0;
else
scaling_factors[k] = 1.0;
}
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//detect if there is a line of all zeros and set the diagonal to a 1 if this happens
#pragma omp parallel for firstprivate(system_size)
for (int k = 0; k < static_cast<int>(system_size); ++k){
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
bool empty = true;
for (std::size_t j = col_begin; j < col_end; ++j)
{
if(Avalues[j] != 0.0)
{
empty = false;
break;
}
}
if(empty == true)
{
A(k,k) = 1.0;
b[k] = 0.0;
}
}
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(system_size); ++k)
{
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
double k_factor = scaling_factors[k];
if (k_factor == 0)
{
// zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (static_cast<int>(Acol_indices[j]) != k )
Avalues[j] = 0.0;
// zero out the RHS
b[k] = 0.0;
}
else
{
// zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
}
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].SetLock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].UnSetLock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mT.size1()); ++i) {
const IndexType row_begin = Trow_indices[i];
const IndexType row_end = Trow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
}
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
#pragma omp atomic
r_value += constant_value;
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &rA,
TSystemVectorType &rDx,
TSystemVectorType &rb,
ModelPart &rModelPart)
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
b_modified.resize(0, false); //free memory
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
double max_diag = 0.0;
for(IndexType i = 0; i < rA.size1(); ++i) {
max_diag = std::max(std::abs(rA(i,i)), max_diag);
}
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nelements, ids)
for (int iii=0; iii<nelements; iii++) {
typename ElementsContainerType::iterator i_element = el_begin + iii;
pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++) {
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
#pragma omp atomic
r_a += v_a;
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
last_found = id_to_find;
last_pos = pos;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
vector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_mv.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_SeqVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCreate( HYPRE_Int size )
{
hypre_Vector *vector;
vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST);
hypre_VectorData(vector) = NULL;
hypre_VectorSize(vector) = size;
hypre_VectorNumVectors(vector) = 1;
hypre_VectorMultiVecStorageMethod(vector) = 0;
/* set defaults */
hypre_VectorOwnsData(vector) = 1;
hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_SHARED;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Vector *vector = hypre_SeqVectorCreate(size);
hypre_VectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorDestroy( hypre_Vector *vector )
{
HYPRE_Int ierr=0;
if (vector)
{
HYPRE_Int memory_location = hypre_VectorMemoryLocation(vector);
if ( hypre_VectorOwnsData(vector) )
{
hypre_TFree(hypre_VectorData(vector), memory_location);
}
hypre_TFree(vector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_Int memory_location )
{
HYPRE_Int size = hypre_VectorSize(vector);
HYPRE_Int ierr = 0;
HYPRE_Int num_vectors = hypre_VectorNumVectors(vector);
HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector);
hypre_VectorMemoryLocation(vector) = memory_location;
/* Caveat: for pre-existing data, the memory location must be guaranteed
* to be consistent with `memory_location'
* Otherwise, mismatches will exist and problems will be encountered
* when being used, and freed */
if ( !hypre_VectorData(vector) )
{
hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size,
memory_location);
}
if ( multivec_storage_method == 0 )
{
hypre_VectorVectorStride(vector) = size;
hypre_VectorIndexStride(vector) = 1;
}
else if ( multivec_storage_method == 1 )
{
hypre_VectorVectorStride(vector) = 1;
hypre_VectorIndexStride(vector) = num_vectors;
}
else
{
++ierr;
}
return ierr;
}
HYPRE_Int
hypre_SeqVectorInitialize( hypre_Vector *vector )
{
HYPRE_Int ierr;
ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetDataOwner( hypre_Vector *vector,
HYPRE_Int owns_data )
{
HYPRE_Int ierr=0;
hypre_VectorOwnsData(vector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* ReadVector
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorRead( char *file_name )
{
hypre_Vector *vector;
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size;
HYPRE_Int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
hypre_fscanf(fp, "%d", &size);
vector = hypre_SeqVectorCreate(size);
hypre_SeqVectorInitialize(vector);
data = hypre_VectorData(vector);
for (j = 0; j < size; j++)
{
hypre_fscanf(fp, "%le", &data[j]);
}
fclose(fp);
/* multivector code not written yet */
hypre_assert( hypre_VectorNumVectors(vector) == 1 );
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorPrint( hypre_Vector *vector,
char *file_name )
{
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size, num_vectors, vecstride, idxstride;
HYPRE_Int i, j;
HYPRE_Complex value;
HYPRE_Int ierr = 0;
num_vectors = hypre_VectorNumVectors(vector);
vecstride = hypre_VectorVectorStride(vector);
idxstride = hypre_VectorIndexStride(vector);
/*----------------------------------------------------------
* Print in the data
*----------------------------------------------------------*/
data = hypre_VectorData(vector);
size = hypre_VectorSize(vector);
fp = fopen(file_name, "w");
if ( hypre_VectorNumVectors(vector) == 1 )
{
hypre_fprintf(fp, "%d\n", size);
}
else
{
hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size );
}
if ( num_vectors>1 )
{
for ( j=0; j<num_vectors; ++j )
{
hypre_fprintf(fp, "vector %d\n", j );
for (i = 0; i < size; i++)
{
value = data[ j*vecstride + i*idxstride ];
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(value), hypre_cimag(value));
#else
hypre_fprintf(fp, "%.14e\n", value);
#endif
}
}
}
else
{
for (i = 0; i < size; i++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(data[i]), hypre_cimag(data[i]));
#else
hypre_fprintf(fp, "%.14e\n", data[i]);
#endif
}
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetConstantValues( hypre_Vector *v,
HYPRE_Complex value )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(v);
hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( fill_n, vector_data, size, value );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
vector_data[i] = value;
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetRandomValues( hypre_Vector *v,
HYPRE_Int seed )
{
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
hypre_SeedRand(seed);
size *= hypre_VectorNumVectors(v);
/* RDF: threading this loop may cause problems because of hypre_Rand() */
for (i = 0; i < size; i++)
{
vector_data[i] = 2.0 * hypre_Rand() - 1.0;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCopy
* copies data from x to y
* if size of x is larger than y only the first size_y elements of x are
* copied to y
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorCopy( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int size_y = hypre_VectorSize(y);
HYPRE_Int ierr = 0;
if (size > size_y)
{
size = size_y;
}
size *= hypre_VectorNumVectors(x);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDcopy(hypre_HandleCublasHandle(hypre_handle), size, x_data, 1, y_data, 1) );
#else
HYPRE_THRUST_CALL( copy_n, x_data, size, y_data );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data,x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] = x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector*
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize(y);
hypre_SeqVectorCopy( x, y );
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneShallow
* Returns a complete copy of x - a shallow copy, pointing the data of x
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneShallow( hypre_Vector *x )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_VectorData(y) = hypre_VectorData(x);
hypre_SeqVectorSetDataOwner( y, 0 );
hypre_SeqVectorInitialize(y);
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorScale( HYPRE_Complex alpha,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(y);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(y);
hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle), size, &alpha, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] *= alpha;
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorAxpy( HYPRE_Complex alpha,
hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle), size, &alpha, x_data, 1, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Real result = 0.0;
size *= hypre_VectorNumVectors(x);
hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#ifndef HYPRE_COMPLEX
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle), size, x_data, 1, y_data, 1, &result) );
#else
result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 );
#endif
#else
/* TODO */
#error "Complex inner product"
#endif
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
result += hypre_conj(y_data[i]) * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return result;
}
//TODO
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_VectorData( vector );
HYPRE_Int size = hypre_VectorSize( vector );
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}
HYPRE_Int
hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_Int to_location)
{
HYPRE_Int ierr = 0;
#ifdef HYPRE_USING_UNIFIED_MEMORY
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x);
if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(x)) != HYPRE_MEMORY_SHARED)
{
/* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/
return ierr;
}
if (size == 0)
{
return ierr;
}
/* speical use of TMemcpy for prefetch */
hypre_TMemcpy(x_data, x_data, HYPRE_Complex, size, to_location, HYPRE_MEMORY_SHARED);
#endif
return ierr;
}
//hypre_int hypre_SeqVectorIsManaged(hypre_Vector *x)
//{
//}
|
GB_unop__floor_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__floor_fc32_fc32)
// op(A') function: GB (_unop_tran__floor_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_cfloorf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cfloorf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_cfloorf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FLOOR || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__floor_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cfloorf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_cfloorf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__floor_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_uint64
// op(A') function: GB_tran__minv_int64_uint64
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_uint64
(
int64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
declare_simd_aarch64.c | // REQUIRES: aarch64-registered-target
// -fopemp and -fopenmp-simd behavior are expected to be the same.
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s --check-prefix=AARCH64
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s --check-prefix=AARCH64
#pragma omp declare simd
#pragma omp declare simd simdlen(2)
#pragma omp declare simd simdlen(6)
#pragma omp declare simd simdlen(8)
double foo(float x);
// AARCH64: "_ZGVnM2v_foo" "_ZGVnM4v_foo" "_ZGVnM8v_foo" "_ZGVnN2v_foo" "_ZGVnN4v_foo" "_ZGVnN8v_foo"
// AARCH64-NOT: _ZGVnN6v_foo
void foo_loop(double *x, float *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = foo(y[i]);
}
}
// make sure that the following two function by default gets generated
// with 4 and 2 lanes, as descrived in the vector ABI
#pragma omp declare simd notinbranch
float bar(double x);
#pragma omp declare simd notinbranch
double baz(float x);
// AARCH64: "_ZGVnN2v_baz" "_ZGVnN4v_baz"
// AARCH64-NOT: baz
// AARCH64: "_ZGVnN2v_bar" "_ZGVnN4v_bar"
// AARCH64-NOT: bar
void baz_bar_loop(double *x, float *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = baz(y[i]);
y[i] = bar(x[i]);
}
}
/***************************/
/* 32-bit integer tests */
/***************************/
#pragma omp declare simd
#pragma omp declare simd simdlen(2)
#pragma omp declare simd simdlen(6)
#pragma omp declare simd simdlen(8)
long foo_int(int x);
// AARCH64: "_ZGVnN2v_foo_int" "_ZGVnN4v_foo_int" "_ZGVnN8v_foo_int"
// No non power of two
// AARCH64-NOT: _ZGVnN6v_foo_int
void foo_int_loop(long *x, int *y, int N) {
for (int i = 0; i < N; ++i) {
x[i] = foo_int(y[i]);
}
}
#pragma omp declare simd
char simple_8bit(char);
// AARCH64: "_ZGVnM16v_simple_8bit" "_ZGVnM8v_simple_8bit" "_ZGVnN16v_simple_8bit" "_ZGVnN8v_simple_8bit"
#pragma omp declare simd
short simple_16bit(short);
// AARCH64: "_ZGVnM4v_simple_16bit" "_ZGVnM8v_simple_16bit" "_ZGVnN4v_simple_16bit" "_ZGVnN8v_simple_16bit"
#pragma omp declare simd
int simple_32bit(int);
// AARCH64: "_ZGVnM2v_simple_32bit" "_ZGVnM4v_simple_32bit" "_ZGVnN2v_simple_32bit" "_ZGVnN4v_simple_32bit"
#pragma omp declare simd
long simple_64bit(long);
// AARCH64: "_ZGVnM2v_simple_64bit" "_ZGVnN2v_simple_64bit"
#pragma omp declare simd
#pragma omp declare simd simdlen(32)
char a01(int x);
// AARCH64: "_ZGVnN16v_a01" "_ZGVnN32v_a01" "_ZGVnN8v_a01"
// AARCH64-NOT: a01
#pragma omp declare simd
#pragma omp declare simd simdlen(2)
long a02(short x);
// AARCH64: "_ZGVnN2v_a02" "_ZGVnN4v_a02" "_ZGVnN8v_a02"
// AARCH64-NOT: a02
/************/
/* pointers */
/************/
#pragma omp declare simd
int b01(int *x);
// AARCH64: "_ZGVnN4v_b01"
// AARCH64-NOT: b01
#pragma omp declare simd
char b02(char *);
// AARCH64: "_ZGVnN16v_b02" "_ZGVnN8v_b02"
// AARCH64-NOT: b02
#pragma omp declare simd
double *b03(double *);
// AARCH64: "_ZGVnN2v_b03"
// AARCH64-NOT: b03
/***********/
/* masking */
/***********/
#pragma omp declare simd inbranch
int c01(double *x, short y);
// AARCH64: "_ZGVnM8vv_c01"
// AARCH64-NOT: c01
#pragma omp declare simd inbranch uniform(x)
double c02(double *x, char y);
// AARCH64: "_ZGVnM16uv_c02" "_ZGVnM8uv_c02"
// AARCH64-NOT: c02
/************************************/
/* Linear with a constant parameter */
/************************************/
#pragma omp declare simd notinbranch linear(i)
double constlinear(const int i);
// AARCH64: "_ZGVnN2l_constlinear" "_ZGVnN4l_constlinear"
// AARCH64-NOT: constlinear
/*************************/
/* sincos-like signature */
/*************************/
#pragma omp declare simd linear(sin) linear(cos)
void sincos(double in, double *sin, double *cos);
// AARCH64: "_ZGVnN2vll_sincos"
// AARCH64-NOT: sincos
#pragma omp declare simd linear(sin : 1) linear(cos : 2)
void SinCos(double in, double *sin, double *cos);
// AARCH64: "_ZGVnN2vll2_SinCos"
// AARCH64-NOT: SinCos
// Selection of tests based on the examples provided in chapter 5 of
// the Vector Function ABI specifications for AArch64, at
// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
// Listing 2, p. 18
#pragma omp declare simd inbranch uniform(x) linear(val(i) : 4)
int foo2(int *x, int i);
// AARCH64: "_ZGVnM2ul4_foo2" "_ZGVnM4ul4_foo2"
// AARCH64-NOT: foo2
// Listing 3, p. 18
#pragma omp declare simd inbranch uniform(x, c) linear(i \
: c)
int foo3(int *x, int i, unsigned char c);
// AARCH64: "_ZGVnM16uls2u_foo3" "_ZGVnM8uls2u_foo3"
// AARCH64-NOT: foo3
// Listing 6, p. 19
#pragma omp declare simd linear(x) aligned(x : 16) simdlen(4)
int foo4(int *x, float y);
// AARCH64: "_ZGVnM4la16v_foo4" "_ZGVnN4la16v_foo4"
// AARCH64-NOT: foo4
static int *I;
static char *C;
static short *S;
static long *L;
static float *F;
static double *D;
void do_something() {
simple_8bit(*C);
simple_16bit(*S);
simple_32bit(*I);
simple_64bit(*L);
*C = a01(*I);
*L = a02(*S);
*I = b01(I);
*C = b02(C);
D = b03(D);
*I = c01(D, *S);
*D = c02(D, *S);
constlinear(*I);
sincos(*D, D, D);
SinCos(*D, D, D);
foo2(I, *I);
foo3(I, *I, *C);
foo4(I, *F);
}
typedef struct S {
char R, G, B;
} STy;
#pragma omp declare simd notinbranch
STy DoRGB(STy x);
// AARCH64: "_ZGVnN2v_DoRGB"
static STy *RGBData;
void do_rgb_stuff() {
DoRGB(*RGBData);
}
|
convolution_3x3_pack8to1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8a-inch/8a-64-outch;
kernel_tm_pack8to1.create(8 * inch / 8, 64, outch / 8 + outch % 8, (size_t)2u * 8, 8);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
const Mat k4 = kernel_tm.channel(p + 4);
const Mat k5 = kernel_tm.channel(p + 5);
const Mat k6 = kernel_tm.channel(p + 6);
const Mat k7 = kernel_tm.channel(p + 7);
Mat g0 = kernel_tm_pack8to1.channel(p / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = (__fp16)k0.row(q + i)[k];
g00[1] = (__fp16)k1.row(q + i)[k];
g00[2] = (__fp16)k2.row(q + i)[k];
g00[3] = (__fp16)k3.row(q + i)[k];
g00[4] = (__fp16)k4.row(q + i)[k];
g00[5] = (__fp16)k5.row(q + i)[k];
g00[6] = (__fp16)k6.row(q + i)[k];
g00[7] = (__fp16)k7.row(q + i)[k];
g00 += 8;
}
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = (__fp16)k0.row(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd63_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
__fp16* output0_tm = top_blob_tm.channel(p);
__fp16* output1_tm = top_blob_tm.channel(p + 1);
__fp16* output2_tm = top_blob_tm.channel(p + 2);
__fp16* output3_tm = top_blob_tm.channel(p + 3);
__fp16* output4_tm = top_blob_tm.channel(p + 4);
__fp16* output5_tm = top_blob_tm.channel(p + 5);
__fp16* output6_tm = top_blob_tm.channel(p + 6);
__fp16* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v16.8h, v0.h[5] \n"
"fmla v30.8h, v16.8h, v0.h[6] \n"
"fmla v31.8h, v16.8h, v0.h[7] \n"
"fmla v24.8h, v17.8h, v1.h[0] \n"
"fmla v25.8h, v17.8h, v1.h[1] \n"
"fmla v26.8h, v17.8h, v1.h[2] \n"
"fmla v27.8h, v17.8h, v1.h[3] \n"
"fmla v28.8h, v17.8h, v1.h[4] \n"
"fmla v29.8h, v17.8h, v1.h[5] \n"
"fmla v30.8h, v17.8h, v1.h[6] \n"
"fmla v31.8h, v17.8h, v1.h[7] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n"
"fmla v24.8h, v18.8h, v2.h[0] \n"
"fmla v25.8h, v18.8h, v2.h[1] \n"
"fmla v26.8h, v18.8h, v2.h[2] \n"
"fmla v27.8h, v18.8h, v2.h[3] \n"
"fmla v28.8h, v18.8h, v2.h[4] \n"
"fmla v29.8h, v18.8h, v2.h[5] \n"
"fmla v30.8h, v18.8h, v2.h[6] \n"
"fmla v31.8h, v18.8h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"fmla v24.8h, v19.8h, v3.h[0] \n"
"fmla v25.8h, v19.8h, v3.h[1] \n"
"fmla v26.8h, v19.8h, v3.h[2] \n"
"fmla v27.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v19.8h, v3.h[4] \n"
"fmla v29.8h, v19.8h, v3.h[5] \n"
"fmla v30.8h, v19.8h, v3.h[6] \n"
"fmla v31.8h, v19.8h, v3.h[7] \n"
"fmla v24.8h, v20.8h, v4.h[0] \n"
"fmla v25.8h, v20.8h, v4.h[1] \n"
"fmla v26.8h, v20.8h, v4.h[2] \n"
"fmla v27.8h, v20.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[5] \n"
"fmla v30.8h, v20.8h, v4.h[6] \n"
"fmla v31.8h, v20.8h, v4.h[7] \n"
"fmla v24.8h, v21.8h, v5.h[0] \n"
"fmla v25.8h, v21.8h, v5.h[1] \n"
"fmla v26.8h, v21.8h, v5.h[2] \n"
"fmla v27.8h, v21.8h, v5.h[3] \n"
"fmla v28.8h, v21.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[6] \n"
"fmla v31.8h, v21.8h, v5.h[7] \n"
"fmla v24.8h, v22.8h, v6.h[0] \n"
"fmla v25.8h, v22.8h, v6.h[1] \n"
"fmla v26.8h, v22.8h, v6.h[2] \n"
"fmla v27.8h, v22.8h, v6.h[3] \n"
"fmla v28.8h, v22.8h, v6.h[4] \n"
"fmla v29.8h, v22.8h, v6.h[5] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v23.8h, v7.h[0] \n"
"fmla v25.8h, v23.8h, v7.h[1] \n"
"fmla v26.8h, v23.8h, v7.h[2] \n"
"fmla v27.8h, v23.8h, v7.h[3] \n"
"fmla v28.8h, v23.8h, v7.h[4] \n"
"fmla v29.8h, v23.8h, v7.h[5] \n"
"fmla v30.8h, v23.8h, v7.h[6] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.8h}, [%1], #16 \n"
"st1 {v25.8h}, [%2], #16 \n"
"st1 {v26.8h}, [%3], #16 \n"
"st1 {v27.8h}, [%4], #16 \n"
"st1 {v28.8h}, [%5], #16 \n"
"st1 {v29.8h}, [%6], #16 \n"
"st1 {v30.8h}, [%7], #16 \n"
"st1 {v31.8h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v28.4h, v16.4h, v0.h[4] \n"
"fmla v29.4h, v16.4h, v0.h[5] \n"
"fmla v30.4h, v16.4h, v0.h[6] \n"
"fmla v31.4h, v16.4h, v0.h[7] \n"
"fmla v24.4h, v17.4h, v1.h[0] \n"
"fmla v25.4h, v17.4h, v1.h[1] \n"
"fmla v26.4h, v17.4h, v1.h[2] \n"
"fmla v27.4h, v17.4h, v1.h[3] \n"
"fmla v28.4h, v17.4h, v1.h[4] \n"
"fmla v29.4h, v17.4h, v1.h[5] \n"
"fmla v30.4h, v17.4h, v1.h[6] \n"
"fmla v31.4h, v17.4h, v1.h[7] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n"
"fmla v24.4h, v18.4h, v2.h[0] \n"
"fmla v25.4h, v18.4h, v2.h[1] \n"
"fmla v26.4h, v18.4h, v2.h[2] \n"
"fmla v27.4h, v18.4h, v2.h[3] \n"
"fmla v28.4h, v18.4h, v2.h[4] \n"
"fmla v29.4h, v18.4h, v2.h[5] \n"
"fmla v30.4h, v18.4h, v2.h[6] \n"
"fmla v31.4h, v18.4h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"fmla v24.4h, v19.4h, v3.h[0] \n"
"fmla v25.4h, v19.4h, v3.h[1] \n"
"fmla v26.4h, v19.4h, v3.h[2] \n"
"fmla v27.4h, v19.4h, v3.h[3] \n"
"fmla v28.4h, v19.4h, v3.h[4] \n"
"fmla v29.4h, v19.4h, v3.h[5] \n"
"fmla v30.4h, v19.4h, v3.h[6] \n"
"fmla v31.4h, v19.4h, v3.h[7] \n"
"fmla v24.4h, v20.4h, v4.h[0] \n"
"fmla v25.4h, v20.4h, v4.h[1] \n"
"fmla v26.4h, v20.4h, v4.h[2] \n"
"fmla v27.4h, v20.4h, v4.h[3] \n"
"fmla v28.4h, v20.4h, v4.h[4] \n"
"fmla v29.4h, v20.4h, v4.h[5] \n"
"fmla v30.4h, v20.4h, v4.h[6] \n"
"fmla v31.4h, v20.4h, v4.h[7] \n"
"fmla v24.4h, v21.4h, v5.h[0] \n"
"fmla v25.4h, v21.4h, v5.h[1] \n"
"fmla v26.4h, v21.4h, v5.h[2] \n"
"fmla v27.4h, v21.4h, v5.h[3] \n"
"fmla v28.4h, v21.4h, v5.h[4] \n"
"fmla v29.4h, v21.4h, v5.h[5] \n"
"fmla v30.4h, v21.4h, v5.h[6] \n"
"fmla v31.4h, v21.4h, v5.h[7] \n"
"fmla v24.4h, v22.4h, v6.h[0] \n"
"fmla v25.4h, v22.4h, v6.h[1] \n"
"fmla v26.4h, v22.4h, v6.h[2] \n"
"fmla v27.4h, v22.4h, v6.h[3] \n"
"fmla v28.4h, v22.4h, v6.h[4] \n"
"fmla v29.4h, v22.4h, v6.h[5] \n"
"fmla v30.4h, v22.4h, v6.h[6] \n"
"fmla v31.4h, v22.4h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v23.4h, v7.h[0] \n"
"fmla v25.4h, v23.4h, v7.h[1] \n"
"fmla v26.4h, v23.4h, v7.h[2] \n"
"fmla v27.4h, v23.4h, v7.h[3] \n"
"fmla v28.4h, v23.4h, v7.h[4] \n"
"fmla v29.4h, v23.4h, v7.h[5] \n"
"fmla v30.4h, v23.4h, v7.h[6] \n"
"fmla v31.4h, v23.4h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h}, [%1], #8 \n"
"st1 {v25.4h}, [%2], #8 \n"
"st1 {v26.4h}, [%3], #8 \n"
"st1 {v27.4h}, [%4], #8 \n"
"st1 {v28.4h}, [%5], #8 \n"
"st1 {v29.4h}, [%6], #8 \n"
"st1 {v30.4h}, [%7], #8 \n"
"st1 {v31.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v30.16b, v30.16b, v30.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8h}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n"
"fmla v30.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.h}[0], [%1], #2 \n"
"st1 {v30.h}[1], [%2], #2 \n"
"st1 {v30.h}[2], [%3], #2 \n"
"st1 {v30.h}[3], [%4], #2 \n"
"st1 {v30.h}[4], [%5], #2 \n"
"st1 {v30.h}[5], [%6], #2 \n"
"st1 {v30.h}[6], [%7], #2 \n"
"st1 {v30.h}[7], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
}
}
remain_outch_start += nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v30.16b, v30.16b, v30.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n"
"fmla v30.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v30.16b, v30.16b, v30.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n"
"fmla v30.4h, v16.4h, v0.h[0] \n"
"fmla v30.4h, v17.4h, v0.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"fmla v30.4h, v18.4h, v0.h[2] \n"
"fmla v30.4h, v19.4h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.4h, v20.4h, v0.h[4] \n"
"fmla v30.4h, v21.4h, v0.h[5] \n"
"fmla v30.4h, v22.4h, v0.h[6] \n"
"fmla v30.4h, v23.4h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f);
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(r0);
float16x8_t _k0 = vld1q_f16(kptr);
_sum0 = vfmaq_f16(_sum0, _r0, _k0);
kptr += 8;
r0 += 8;
}
__fp16 sum0 = vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0))));
output0_tm[0] = sum0;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
eliminate-branches.h | #ifndef __ELIMINATE_BRANCHES_H__
#define __ELIMINATE_BRANCHES_H__
#include <omp.h>
#include "../data-structures/data-structures.h"
#include "../matrix-formats/matrix-formats.h"
#include "elimitate-variables.h"
namespace __core__ {
namespace __linear_algebra__ {
namespace __cholesky__ {
template <typename T,typename IT,typename Allocator>
void eliminateBranch(MatrixCXSHandler<T,IT> L,MatrixCXSHandler<T,IT> A,MatrixCXSHandler<IT,IT> RP,LRTree<IT,IT,Allocator,int>& tree,Node<IT,IT,int>& root,ArrayHandler<IT,IT> p,
ArrayHandler<T,IT> c){
typedef Node<IT,IT,int> NT;
int node=root.left_child;
if(node!=tree.invalidPos) {
bool down=true;
size_t i=0;
while((i++)<tree.size()) {
if(down)
while(tree[node].left_child!=tree.invalidPos)
node=tree[node].left_child;
down=false;
eliminateVariable(L,A,RP,p,c,tree[node].key);
if(tree[node].right_sibling!=tree.invalidPos) {
node=tree[node].right_sibling;
down=true;
}
else
node=tree[node].parent;
if(node==0||node==tree.invalidPos||node==root.self)
break;
}
}
eliminateVariable(L,A,RP,p,c,root.key);
#pragma omp critical
tree.eraseBranch(root);
}
template <typename T,typename IT,typename Allocator>
void eliminateBranch(MatrixCXSHandler<T,IT> L,MatrixCXSHandler<T,IT> A,MatrixCXSHandler<IT,IT> RP,LRTree<IT,IT,Allocator,int>& tree,Node<IT,IT,int>& root,ArrayHandler<IT,IT> p,
std::vector<ArrayHandler<T,IT>> &c){
typedef Node<IT,IT,int> NT;
int node=root.left_child;
if(node!=tree.invalidPos) {
bool down=true;
size_t i=0;
while((i++)<tree.size()) {
if(down)
while(tree[node].left_child!=tree.invalidPos)
node=tree[node].left_child;
down=false;
eliminateVariable(L,A,RP,p,c.at(omp_get_thread_num()),tree[node].key);
if(tree[node].right_sibling!=tree.invalidPos) {
node=tree[node].right_sibling;
down=true;
}
else
node=tree[node].parent;
if(node==0||node==tree.invalidPos||node==root.self)
break;
}
}
eliminateVariable(L,A,RP,p,c.at(omp_get_thread_num()),root.key);
#pragma omp critical
tree.eraseBranch(root);
}
template <typename T,typename IT,typename Allocator>
void eliminateBranch(MatrixCXSHandler<T,IT> L,MatrixCXSHandler<T,IT> A,MatrixCXSHandler<IT,IT> RP,LRTree<IT,IT,Allocator,int>& tree,Node<IT,IT,int>& root,ArrayHandler<IT,IT> p,
std::vector<ArrayHandler<T,IT>> &c,int tid){
typedef Node<IT,IT,int> NT;
int node=root.left_child;
if(node!=tree.invalidPos) {
bool down=true;
size_t i=0;
while((i++)<tree.size()) {
if(down)
while(tree[node].left_child!=tree.invalidPos)
node=tree[node].left_child;
down=false;
eliminateVariable(L,A,RP,p,c.at(tid),tree[node].key);
if(tree[node].right_sibling!=tree.invalidPos) {
node=tree[node].right_sibling;
down=true;
}
else
node=tree[node].parent;
if(node==0||node==tree.invalidPos||node==root.self)
break;
}
}
eliminateVariable(L,A,RP,p,c.at(tid),root.key);
#pragma omp critical
tree.eraseBranch(root);
}
template <typename T,typename IT,typename Allocator>
void eliminateBranch(MatrixCXSHandler<T,IT> L,MatrixCXSHandler<T,IT> A,MatrixCXSHandler<IT,IT> RP,LRTree<IT,IT,Allocator,int>& tree,Node<IT,IT,int>& root,ArrayHandler<IT,IT> p,
std::vector<ArrayHandler<T,IT>> &c,double& time){
typedef Node<IT,IT,int> NT;
int node=root.left_child;
cpu_timer timer;
if(node!=tree.invalidPos) {
bool down=true;
size_t i=0;
while((i++)<tree.size()) {
if(down)
while(tree[node].left_child!=tree.invalidPos)
node=tree[node].left_child;
down=false;
// timer.start();
//#pragma omp critical
time+=eliminateVariableT(L,A,RP,p,c.at(omp_get_thread_num()),tree[node].key);
// timer.stop();
//#pragma omp critical
// time+=timer.elapsed_time();
if(tree[node].right_sibling!=tree.invalidPos) {
node=tree[node].right_sibling;
down=true;
}
else
node=tree[node].parent;
if(node==0||node==tree.invalidPos||node==root.self)
break;
}
}
// timer.start();
time+=eliminateVariableT(L,A,RP,p,c.at(omp_get_thread_num()),root.key);
// timer.stop();
//#pragma omp critical
// time+=timer.elapsed_time();
#pragma omp critical
tree.eraseBranch(root);
}
}
}
}
#endif
|
thread-limit-1.c | /* { dg-do run } */
/* { dg-set-target-env-var OMP_THREAD_LIMIT "6" } */
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
int
main ()
{
if (omp_get_thread_limit () != 6)
return 0;
omp_set_dynamic (0);
omp_set_nested (1);
#pragma omp parallel num_threads (3)
if (omp_get_num_threads () != 3)
abort ();
#pragma omp parallel num_threads (3)
if (omp_get_num_threads () != 3)
abort ();
#pragma omp parallel num_threads (8)
if (omp_get_num_threads () > 6)
abort ();
#pragma omp parallel num_threads (6)
if (omp_get_num_threads () != 6)
abort ();
int cnt = 0;
#pragma omp parallel num_threads (5)
#pragma omp parallel num_threads (5)
#pragma omp parallel num_threads (2)
{
int v;
#pragma omp atomic capture
v = ++cnt;
if (v > 6)
abort ();
usleep (10000);
#pragma omp atomic
--cnt;
}
return 0;
}
|
app.c | /**
* cgiannoula: christina.giann@gmail.com
* Christina Giannoula
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/partition.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
* 4. Help structures for data partitioning
*/
static struct BDCSRMatrix* A;
static struct COOMatrix* B;
static val_dt* x;
static val_dt* y;
static val_dt* z;
static struct partition_info_t *part_info;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t rows_per_dpu;
uint32_t rows_per_dpu_pad;
uint32_t cols_per_dpu;
uint32_t prev_rows_dpu;
uint32_t prev_nnz_dpu;
uint32_t nnz;
uint32_t nnz_pad;
uint32_t ptr_offset;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_row_partition
* @param factor n to create partitions
* @param column_partitions to create vert_partitions
* @param horz_partitions to return the 2D partitioning
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host CPU
*/
static void spmv_host(val_dt* y, struct BDCSRMatrix *A, val_dt* x) {
uint64_t total_nnzs = 0;
for (uint32_t c = 0; c < A->vert_partitions; c++) {
for(uint32_t rowIndx = 0; rowIndx < A->nrows; ++rowIndx) {
val_dt sum = 0;
uint32_t ptr_offset = c * (A->nrows + 1);
uint32_t col_offset = A->vert_tile_widths[c];
for(uint32_t n = A->drowptr[ptr_offset + rowIndx]; n < A->drowptr[ptr_offset + rowIndx + 1]; n++) {
uint32_t colIndx = A->dcolind[total_nnzs];
val_dt value = A->dval[total_nnzs++];
sum += x[col_offset + colIndx] * value;
}
y[rowIndx] += sum;
}
}
}
/**
* @brief main of the host application
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
uint32_t nr_of_ranks;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
B = readCOOMatrix(p.fileName);
sortCOOMatrix(B);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
A = coo2bdcsr(B, horz_partitions, vert_partitions);
freeCOOMatrix(B);
// Initialize partition data
part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS);
#if FG_TRANS
struct dpu_set_t rank;
uint32_t each_rank;
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank;
}
int sum = 0;
for(int i=0; i < p.max_nranks+1; i++) {
part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum;
sum += part_info->active_dpus_per_rank[i];
}
#endif
// Initialize help data - Padding needed
uint32_t ncols_pad = A->vert_tile_widths[A->vert_partitions-1] + A->max_tile_width;
uint32_t tile_width_pad = A->max_tile_width;
uint32_t nrows_pad = A->nrows;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Allocate output vector
z = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Load-balance nnzs among DPUs of the same vertical partition
partition_by_nnz(A, part_info);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_rows_per_dpu = 0;
uint64_t max_nnz_ind_per_dpu = 0;
uint64_t max_nnz_val_per_dpu = 0;
uint64_t max_rows_per_tasklet = 0;
// Timer for measurements
Timer timer;
uint64_t total_nnzs = 0;
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for rows and non-zero elements needed for CPU-DPU transfers
uint32_t tile_horz_indx = i % A->horz_partitions;
uint32_t tile_vert_indx = i / A->horz_partitions;
uint32_t rows_per_dpu = part_info->row_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->row_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx];
uint32_t prev_rows_dpu = part_info->row_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx];
// Pad data to be transfered
uint32_t rows_per_dpu_pad = rows_per_dpu + 1;
if (rows_per_dpu_pad % (8 / byte_dt) != 0)
rows_per_dpu_pad += ((8 / byte_dt) - (rows_per_dpu_pad % (8 / byte_dt)));
#if INT64 || FP64
if (rows_per_dpu_pad % 2 == 1)
rows_per_dpu_pad++;
#endif
if (rows_per_dpu_pad > max_rows_per_dpu)
max_rows_per_dpu = rows_per_dpu_pad;
unsigned int nnz, nnz_ind_pad, nnz_val_pad;
nnz = A->drowptr[tile_vert_indx * (A->nrows + 1) + prev_rows_dpu + rows_per_dpu] - A->drowptr[tile_vert_indx * (A->nrows + 1) + prev_rows_dpu];
if (nnz % 2 != 0)
nnz_ind_pad = nnz + 1;
else
nnz_ind_pad = nnz;
if (nnz % (8 / byte_dt) != 0)
nnz_val_pad = nnz + ((8 / byte_dt) - (nnz % (8 / byte_dt)));
else
nnz_val_pad = nnz;
#if INT64 || FP64
if (nnz_ind_pad % 2 == 1)
nnz_ind_pad++;
if (nnz_val_pad % 2 == 1)
nnz_val_pad++;
#endif
if (nnz_ind_pad > max_nnz_ind_per_dpu)
max_nnz_ind_per_dpu = nnz_ind_pad;
if (nnz_val_pad > max_nnz_val_per_dpu)
max_nnz_val_per_dpu = nnz_val_pad;
uint32_t prev_nnz_dpu = total_nnzs;
total_nnzs += nnz;
// Keep information per DPU
dpu_info[i].rows_per_dpu = rows_per_dpu;
dpu_info[i].cols_per_dpu = A->vert_tile_widths[tile_vert_indx+1] - A->vert_tile_widths[tile_vert_indx];
dpu_info[i].prev_rows_dpu = prev_rows_dpu;
dpu_info[i].prev_nnz_dpu = prev_nnz_dpu;
dpu_info[i].nnz = nnz;
dpu_info[i].nnz_pad = nnz_ind_pad;
dpu_info[i].ptr_offset = tile_vert_indx * (A->nrows + 1) + prev_rows_dpu;
// Find input arguments per DPU
input_args[i].nrows = rows_per_dpu;
input_args[i].tcols = tile_width_pad;
input_args[i].nnz_pad = nnz_ind_pad;
input_args[i].nnz_offset = A->drowptr[tile_vert_indx * (A->nrows + 1) + prev_rows_dpu];
#if BLNC_TSKLT_ROW
// Load-balance rows across tasklets
partition_tsklt_by_row(part_info, i, rows_per_dpu, NR_TASKLETS);
#else
// Load-balance nnzs across tasklets
partition_tsklt_by_nnz(A, part_info, i, rows_per_dpu, nnz, tile_vert_indx * (A->nrows + 1) + prev_rows_dpu, NR_TASKLETS);
#endif
uint32_t t;
for (t = 0; t < NR_TASKLETS; t++) {
// Find input arguments per tasklet
input_args[i].start_row[t] = part_info->row_split_tasklet[t];
input_args[i].rows_per_tasklet[t] = part_info->row_split_tasklet[t+1] - part_info->row_split_tasklet[t];
if (input_args[i].rows_per_tasklet[t] > max_rows_per_tasklet)
max_rows_per_tasklet = input_args[i].rows_per_tasklet[t];
}
}
assert(A->nnz == total_nnzs && "wrong balancing");
#if FG_TRANS
// Find max number of rows and columns (subset of elements of the output vector) among DPUs of each rank
DPU_RANK_FOREACH(dpu_set, rank, each_rank){
uint32_t max_rows_cur_rank = 0;
uint32_t max_cols_cur_rank = 0;
uint32_t nr_dpus_in_rank;
DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank));
uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank];
for (uint32_t k = 0; k < nr_dpus_in_rank; k++) {
if (start_dpu + k >= nr_of_dpus)
break;
if (dpu_info[start_dpu + k].rows_per_dpu > max_rows_cur_rank)
max_rows_cur_rank = dpu_info[start_dpu + k].rows_per_dpu;
if (dpu_info[start_dpu + k].cols_per_dpu > max_cols_cur_rank)
max_cols_cur_rank = dpu_info[start_dpu + k].cols_per_dpu;
}
if (max_rows_cur_rank % 2 != 0)
max_rows_cur_rank++;
if (max_rows_cur_rank % (8 / byte_dt) != 0)
max_rows_cur_rank += ((8 / byte_dt) - (max_rows_cur_rank % (8 / byte_dt)));
if (max_cols_cur_rank % (8 / byte_dt) != 0)
max_cols_cur_rank += ((8 / byte_dt) - (max_cols_cur_rank % (8 / byte_dt)));
part_info->max_rows_per_rank[each_rank] = (uint32_t) max_rows_cur_rank;
part_info->max_cols_per_rank[each_rank] = (uint32_t) max_cols_cur_rank;
}
#endif
// Initializations for parallel transfers with padding needed
if (max_rows_per_dpu % 2 != 0)
max_rows_per_dpu++;
if (max_rows_per_dpu % (8 / byte_dt) != 0)
max_rows_per_dpu += ((8 / byte_dt) - (max_rows_per_dpu % (8 / byte_dt)));
if (max_nnz_ind_per_dpu % 2 != 0)
max_nnz_ind_per_dpu++;
if (max_nnz_val_per_dpu % (8 / byte_dt) != 0)
max_nnz_val_per_dpu += ((8 / byte_dt) - (max_nnz_val_per_dpu % (8 / byte_dt)));
if (max_rows_per_tasklet % (8 / byte_dt) != 0)
max_rows_per_tasklet += ((8 / byte_dt) - (max_rows_per_tasklet % (8 / byte_dt)));
// Re-allocations for padding needed
A->drowptr = (uint32_t *) realloc(A->drowptr, (max_rows_per_dpu * (uint64_t) nr_of_dpus * sizeof(uint32_t)));
A->dcolind = (uint32_t *) realloc(A->dcolind, (max_nnz_ind_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->dval = (val_dt *) realloc(A->dval, (max_nnz_val_per_dpu * nr_of_dpus * sizeof(val_dt)));
x = (val_dt *) realloc(x, (uint64_t) ((uint64_t) A->vert_partitions * (uint64_t) tile_width_pad) * (uint64_t) sizeof(val_dt));
y = (val_dt *) malloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_rows_per_dpu) * (uint64_t) sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = ((max_rows_per_dpu) * sizeof(uint32_t)) + (max_nnz_ind_per_dpu * sizeof(uint32_t)) + (max_nnz_val_per_dpu * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_rows_per_dpu * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_rows = max_rows_per_dpu;
input_args[i].max_nnz_ind = max_nnz_ind_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
// Copy Rowptr
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->drowptr + dpu_info[i].ptr_offset));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Colind
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->dcolind + dpu_info[i].prev_nnz_dpu));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_rows_per_dpu * sizeof(uint32_t), max_nnz_ind_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Values
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->dval + dpu_info[i].prev_nnz_dpu));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_rows_per_dpu * sizeof(uint32_t) + max_nnz_ind_per_dpu * sizeof(uint32_t), max_nnz_val_per_dpu * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
#if CG_TRANS
// Coarse-grained data transfers in the input vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx]));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the input vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i / A->horz_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx]));
}
i = 0;
//struct dpu_set_t rank;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), part_info->max_cols_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (default: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
#if CG_TRANS
// Coarse-grained data transfers in the output vector
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_rows_per_dpu)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_rows_per_dpu * sizeof(val_dt), DPU_XFER_DEFAULT));
#endif
#if FG_TRANS
// Fine-grained data transfers in the output vector at rank granularity
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_rows_per_dpu)));
}
i = 0;
DPU_RANK_FOREACH(dpu_set, rank) {
DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_rows_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC));
i++;
}
DPU_ASSERT(dpu_sync(dpu_set));
#endif
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t;
for (c = 0; c < A->vert_partitions; c++) {
for (r = 0; r < A->horz_partitions; r++) {
#pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_rows_per_dpu, r, c) private(t)
for (t = 0; t < part_info->row_split[c * (A->horz_partitions + 1) + r+1] - part_info->row_split[c * (A->horz_partitions + 1) + r]; t++) {
z[part_info->row_split[c * (A->horz_partitions + 1) + r] + t] += y[(c * A->horz_partitions + r) * max_rows_per_dpu + t];
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
startTimer(&timer, 4);
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (i = 0; i < A->nrows; i++) {
if(y_host[i] != z[i]) {
status = false;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeBDCSRMatrix(A);
free(x);
free(y);
free(z);
partition_free(part_info);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% John Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/fx-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5
#define RightShiftOperator 0xf6
#define LessThanEqualOperator 0xf7
#define GreaterThanEqualOperator 0xf8
#define EqualOperator 0xf9
#define NotEqualOperator 0xfa
#define LogicalAndOperator 0xfb
#define LogicalOrOperator 0xfc
#define ExponentialNotation 0xfd
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *image,const char *expression)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: the expression.
%
*/
MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=image;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert complex to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% ExceptionInfo *exception)
% Image *AddNoiseImageChannel(const Image *image,const ChannelType channel,
% const NoiseType noise_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
ExceptionInfo *exception)
{
Image
*noise_image;
noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception);
return(noise_image);
}
MagickExport Image *AddNoiseImageChannel(const Image *image,
const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
const char
*option;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
attenuate;
RandomInfo
**restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
InheritException(exception,&noise_image->exception);
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
attenuate=1.0;
option=GetImageArtifact(image,"attenuate");
if (option != (char *) NULL)
attenuate=StringToDouble(option,(char **) NULL);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict noise_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise(random_info[id],
GetPixelRed(p),noise_type,attenuate)));
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
else
{
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelGreen(p),noise_type,attenuate)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelBlue(p),noise_type,attenuate)));
}
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelOpacity(p),noise_type,attenuate)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(noise_indexes+x,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],GetPixelIndex(
indexes+x),noise_type,attenuate)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shift_image->exception);
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
Quantum
quantum;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(p);
if (GetPixelGreen(p) < quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) < quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(GetPixelRed(p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum);
quantum=GetPixelRed(p);
if (GetPixelGreen(p) > quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) > quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image);
(void) NegateImage(charcoal_image,MagickFalse);
(void) GrayscaleImage(charcoal_image,image->intensity);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *opacity,
% const PixelPacket colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A character string indicating the level of opacity as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *opacity,
const PixelPacket colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
CacheView
*colorize_view,
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&colorize_image->exception);
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) ||
(IsPixelGray(&colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace);
if ((colorize_image->matte == MagickFalse) &&
(colorize.opacity != OpaqueOpacity))
(void) SetImageAlphaChannel(colorize_image,OpaqueAlphaChannel);
if (opacity == (const char *) NULL)
return(colorize_image);
/*
Determine RGB values of the pen color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=geometry_info.rho;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
colorize_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,colorize_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+
colorize.red*pixel.red)/100.0));
SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+
colorize.green*pixel.green)/100.0));
SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+
colorize.blue*pixel.blue)/100.0));
if (colorize_image->matte == MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
else
SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0-pixel.opacity)+
colorize.opacity*pixel.opacity)/100.0));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(colorize_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
colorize_view=DestroyCacheView(colorize_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Create color matrix.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass) == MagickFalse)
{
InheritException(exception,&color_image->exception);
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
ColorMatrix image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
pixel;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
register IndexPacket
*restrict color_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
color_indexes=GetCacheViewAuthenticIndexQueue(color_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]*
GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p);
if (image->matte != MagickFalse)
pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x);
pixel+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3:
{
if (image->matte != MagickFalse)
SetPixelAlpha(q,ClampToQuantum(pixel));
break;
}
case 4:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(color_indexes+x,ClampToQuantum(pixel));
break;
}
}
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% MagickRealType FxEvaluateChannelExpression(FxInfo *fx_info,
% const ChannelType channel,const ssize_t x,const ssize_t y,
% MagickRealType *alpha,Exceptioninfo *exception)
% MagickRealType FxEvaluateExpression(FxInfo *fx_info,
% MagickRealType *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType FxChannelStatistics(FxInfo *fx_info,const Image *image,
ChannelType channel,const char *symbol,ExceptionInfo *exception)
{
char
key[MaxTextExtent],
statistic[MaxTextExtent];
const char
*value;
register const char
*p;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickChannelOptions,MagickTrue,p+1);
if (option >= 0)
channel=(ChannelType) option;
}
(void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
return(QuantumScale*StringToDouble(value,(char **) NULL));
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageChannelDepth(image,channel,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",
standard_deviation);
}
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static MagickRealType
FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t,
const ssize_t,const char *,MagickRealType *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static MagickRealType FxGetSymbol(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent],
symbol[MaxTextExtent];
const char
*p,
*value;
Image
*image;
MagickPixelPacket
pixel;
MagickRealType
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
length;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) *(p+1)) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
i=(ssize_t) (alpha+0.5);
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) *(p+1)) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
GetMagickPixelPacket(image,&pixel);
(void) InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
if ((strlen(p) > 2) &&
(LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MaxTextExtent];
(void) CopyMagickString(name,p,MaxTextExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
MagickPixelPacket
*color;
color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors,
name);
if (color != (MagickPixelPacket *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(name),
CloneMagickPixelPacket(&pixel));
p+=strlen(name);
}
}
}
(void) CopyMagickString(symbol,p,MaxTextExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedChannel: return(QuantumScale*pixel.red);
case GreenChannel: return(QuantumScale*pixel.green);
case BlueChannel: return(QuantumScale*pixel.blue);
case OpacityChannel:
{
MagickRealType
alpha;
if (pixel.matte == MagickFalse)
return(1.0);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
return(alpha);
}
case IndexChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
case DefaultChannels:
{
return(QuantumScale*MagickPixelIntensityToQuantum(&pixel));
}
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case OpacityChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BlueChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case OpacityChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case IndexChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
return(0.0);
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((MagickRealType) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"intensity") == 0)
return(QuantumScale*MagickPixelIntensityToQuantum(&pixel));
if (LocaleCompare(symbol,"i") == 0)
return((MagickRealType) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((MagickRealType) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.21267f*pixel.red+0.71516f*pixel.green+0.07217f*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((MagickRealType) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.opacity);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((MagickRealType) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((MagickRealType) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((MagickRealType) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((MagickRealType) image->page.y);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((MagickRealType) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((MagickRealType) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
{
MagickRealType
depth;
depth=(MagickRealType) GetImageChannelDepth(image,channel,
fx_info->exception);
return(depth);
}
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return((MagickRealType) StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((char) c)) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((char) *expression)) != 0) ||
(strchr("(",(int) *expression) != (char *) NULL)) ||
((isdigit((int) ((char) c)) == 0) &&
(isdigit((int) ((char) *expression)) != 0))) &&
(strchr("xy",(int) *expression) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static MagickRealType FxEvaluateSubexpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,
const char *expression,MagickRealType *beta,ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent];
MagickRealType
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity != UndefinedException)
return(0.0);
while (isspace((int) *expression) != 0)
expression++;
if (*expression == '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"MissingExpression","`%s'",expression);
return(0.0);
}
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta,
exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info,
channel,x,y,++p,beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
if (*beta == 0.0)
{
if (exception->severity == UndefinedException)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=fabs(floor(((double) *beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod((double) alpha,(double) *beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) << (size_t)
(gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) >> (size_t)
(gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(fabs(alpha-(*beta)) < MagickEpsilon ? MagickEpsilon : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) & (size_t)
(gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(MagickRealType) ((size_t) (alpha+0.5) | (size_t)
(gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(alpha > 0.0) && (gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
*beta=(alpha > 0.0) || (gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
MagickRealType
gamma;
(void) CopyMagickString(subexpression,++p,MaxTextExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs((double) alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,beta,exception);
return(gamma);
}
case '=':
{
char
numeric[MaxTextExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
(void) FormatLocaleString(numeric,MaxTextExtent,"%g",(double)
*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(void) CopyMagickString(subexpression,expression+1,MaxTextExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta,
exception);
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta,
exception);
return((MagickRealType) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) fabs((double) alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) acosh((double) alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) acos((double) alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) asinh((double) alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) asin((double) alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) atan2((double) alpha,(double) *beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) atanh((double) alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) atan((double) alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) ceil((double) alpha));
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) cosh((double) alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) cos((double) alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel: type="cyan"; break;
case MagentaChannel: type="magenta"; break;
case YellowChannel: type="yellow"; break;
case OpacityChannel: type="opacity"; break;
case BlackChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedChannel: type="red"; break;
case GreenChannel: type="green"; break;
case BlueChannel: type="blue"; break;
case OpacityChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MaxTextExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,
"%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename,
(double) x,(double) y,type,subexpression,GetMagickPrecision(),
(double) alpha);
return(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) (alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return((MagickRealType) MagickEpsilon);
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) exp((double) alpha));
}
if (LocaleCompare(expression,"e") == 0)
return((MagickRealType) 2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) floor((double) alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
gamma=exp((double) (-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
return((MagickRealType) gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType)
(*beta+0.5));
return((MagickRealType) gcd);
}
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) hypot((double) alpha,(double) *beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) floor(alpha));
}
#if defined(MAGICKCORE_HAVE_ISNAN)
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) !!isnan((double) alpha));
}
#endif
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) j0((double) alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) j1((double) alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
if (alpha == 0.0)
return(1.0);
gamma=(MagickRealType) (2.0*j1((double) (MagickPI*alpha))/
(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta,
exception);
return((MagickRealType) log((double) alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta,
exception);
return((MagickRealType) log10((double) alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) log10((double) alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return((MagickRealType) QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
gamma=alpha-floor((double) (alpha/(*beta)))*(*beta);
return(gamma);
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
return((MagickRealType) MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
return((MagickRealType) MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) pow((double) alpha,(double) *beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return((MagickRealType) QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return((MagickRealType) QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
return((MagickRealType) GetPseudoRandomValue(fx_info->random_info));
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
return((MagickRealType) floor((double) alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
if (alpha == 0)
return(1.0);
gamma=(MagickRealType) (sin((double) (MagickPI*alpha))/
(MagickPI*alpha));
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) sinh((double) alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) sin((double) alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) sqrt((double) alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta,
exception);
return((MagickRealType) (1.0/(1.0+exp((double) (-alpha)))));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta,
exception);
return((MagickRealType) tanh((double) alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta,
exception);
return((MagickRealType) tan((double) alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
if (alpha >= 0.0)
return((MagickRealType) floor((double) alpha));
return((MagickRealType) ceil((double) alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta,
exception);
} while (fabs((double) alpha) >= MagickEpsilon);
return((MagickRealType) *beta);
}
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
MagickRealType *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
MagickRealType *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
fx_info->file=file;
return(status);
}
MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,
MagickRealType *alpha,ExceptionInfo *exception)
{
MagickRealType
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&beta,
exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
% Image *FxImageChannel(const Image *image,const ChannelType channel,
% const char *expression,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
MagickRealType
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
Image
*fx_image;
fx_image=FxImageChannel(image,GrayChannel,expression,exception);
return(fx_image);
}
MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel,
const char *expression,ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view;
FxInfo
**restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse)
{
InheritException(exception,&fx_image->exception);
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
alpha;
register IndexPacket
*restrict fx_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view);
alpha=0.0;
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y,
&alpha,exception);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*
alpha));
}
if ((channel & GreenChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y,
&alpha,exception);
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*
alpha));
}
if ((channel & BlueChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y,
&alpha,exception);
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*
alpha));
}
if ((channel & OpacityChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y,
&alpha,exception);
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum((MagickRealType)
QuantumRange*alpha));
else
SetPixelOpacity(q,ClampToQuantum((MagickRealType)
(QuantumRange-QuantumRange*alpha)));
}
if (((channel & IndexChannel) != 0) &&
(fx_image->colorspace == CMYKColorspace))
{
(void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y,
&alpha,exception);
SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType)
QuantumRange*alpha));
}
q++;
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImageChannel)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*image_view,
*implode_view;
Image
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
implode_image=CloneImage(image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse)
{
InheritException(exception,&implode_image->exception);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.opacity != OpaqueOpacity)
implode_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows/(double) image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(implode_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,implode_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
PointInfo
delta;
register IndexPacket
*restrict implode_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin((double) (MagickPI*sqrt((double) distance)/
radius/2)),-amount);
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+
center.x),(double) (factor*delta.y/scale.y+center.y),&pixel,
exception);
SetPixelPacket(implode_image,&pixel,q,implode_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,
const size_t number_frames,ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
MagickRealType
alpha,
beta;
register const Image
*next;
register ssize_t
i;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (i=1; i < (ssize_t) number_frames; i++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (i=0; i < (ssize_t) number_frames; i++)
{
CacheView
*image_view,
*morph_view;
beta=(MagickRealType) (i+1.0)/(MagickRealType) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*
next->rows+beta*GetNextImageInList(next)->rows+0.5),
next->filter,next->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse)
{
InheritException(exception,&morph_image->exception);
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,
GetNextImageInList(next)->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(alpha*
GetPixelRed(q)+beta*GetPixelRed(p)));
SetPixelGreen(q,ClampToQuantum(alpha*
GetPixelGreen(q)+beta*GetPixelGreen(p)));
SetPixelBlue(q,ClampToQuantum(alpha*
GetPixelBlue(q)+beta*GetPixelBlue(p)));
SetPixelOpacity(q,ClampToQuantum(alpha*
GetPixelOpacity(q)+beta*GetPixelOpacity(p)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (i < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const MagickRealType pixel,const MagickRealType noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
return(plasma);
}
MagickExport MagickBooleanType PlasmaImageProxy(Image *image,
CacheView *image_view,RandomInfo *random_info,const SegmentInfo *segment,
size_t attenuate,size_t depth)
{
ExceptionInfo
*exception;
MagickRealType
plasma;
PixelPacket
u,
v;
ssize_t
x,
x_mid,
y,
y_mid;
if (((segment->x2-segment->x1) == 0.0) && ((segment->y2-segment->y1) == 0.0))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
return(PlasmaImageProxy(image,image_view,random_info,&local_info,
attenuate,depth));
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((segment->x1 == (double) x_mid) && (segment->x2 == (double) x_mid) &&
(segment->y1 == (double) y_mid) && (segment->y2 == (double) y_mid))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
exception=(&image->exception);
plasma=(MagickRealType) QuantumRange/(2.0*attenuate);
if ((segment->x1 != (double) x_mid) || (segment->x2 != (double) x_mid))
{
register PixelPacket
*restrict q;
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType)
(u.red+v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType)
(u.green+v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType)
(u.blue+v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (segment->x1 != segment->x2)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType)
(u.red+v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType)
(u.green+v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType)
(u.blue+v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((segment->y1 != (double) y_mid) || (segment->y2 != (double) y_mid))
{
if ((segment->x1 != (double) x_mid) || (segment->y2 != (double) y_mid))
{
register PixelPacket
*restrict q;
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType)
(u.red+v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType)
(u.green+v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType)
(u.blue+v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (segment->y1 != segment->y2)
{
register PixelPacket
*restrict q;
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType)
(u.red+v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType)
(u.green+v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType)
(u.blue+v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((segment->x1 != segment->x2) || (segment->y1 != segment->y2))
{
register PixelPacket
*restrict q;
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneVirtualPixel(image,x,y,&u,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(image_view,x,y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType)
(u.red+v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType)
(u.green+v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType)
(u.blue+v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (((segment->x2-segment->x1) < 3.0) && ((segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth)
{
CacheView
*image_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image_view=AcquireVirtualCacheView(image,&image->exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,random_info,segment,attenuate,depth);
random_info=DestroyRandomInfo(random_info);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the AnnotateImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const double angle,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const double angle,ExceptionInfo *exception)
{
const char
*value;
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
value=GetImageProperty(image,"Caption");
if (value != (const char *) NULL)
{
char
*caption,
geometry[MaxTextExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,
value);
(void) CloneString(&annotate_info->text,caption);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&caption);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5));
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image);
(void) CloneString(&annotate_info->text,caption);
(void) FormatLocaleString(geometry,MaxTextExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
caption=DestroyString(caption);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image);
(void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,OverCompositeOp,caption_image,
quantum,(ssize_t) (image->rows+3*quantum/2));
caption_image=DestroyImage(caption_image);
}
(void) QueryColorDatabase("none",&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
InheritException(&bend_image->exception,exception);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,OverCompositeOp,picture_image,
(ssize_t) (-0.01*picture_image->columns/2.0),0L);
picture_image=DestroyImage(picture_image);
(void) QueryColorDatabase("none",&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sepia_image->exception);
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (MagickRealType) QuantumRange : intensity+
(MagickRealType) QuantumRange-threshold;
SetPixelRed(q,ClampToQuantum(tone));
tone=intensity > (7.0*threshold/6.0) ? (MagickRealType) QuantumRange :
intensity+(MagickRealType) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(q,ClampToQuantum(tone));
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(q,ClampToQuantum(tone));
tone=threshold/7.0;
if ((MagickRealType) GetPixelGreen(q) < tone)
SetPixelGreen(q,ClampToQuantum(tone));
if ((MagickRealType) GetPixelBlue(q) < tone)
SetPixelBlue(q,ClampToQuantum(tone));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image);
(void) ContrastImage(sepia_image,MagickTrue);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double opacity,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double opacity,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod);
clone_image->compose=OverCompositeOp;
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorDatabase("none",&clone_image->border_color,exception);
border_image=BorderImage(clone_image,&border_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->matte == MagickFalse)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel);
/*
Shadow image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(border_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(border_image,border_image,border_image->rows,1)
#endif
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
SetPixelRed(q,border_image->background_color.red);
SetPixelGreen(q,border_image->background_color.green);
SetPixelBlue(q,border_image->background_color.blue);
if (border_image->matte == MagickFalse)
SetPixelOpacity(q,border_image->background_color.opacity);
else
SetPixelOpacity(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelAlpha(q)*opacity/100.0)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadowImage)
#endif
proceed=SetImageProgress(image,ShadowImageTag,progress++,
border_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
MagickPixelPacket
zero;
RandomInfo
**restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
GetMagickPixelPacket(random_image,&zero);
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(random_view);
pixel=zero;
for (x=0; x < (ssize_t) random_image->columns; x++)
{
pixel.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
pixel.green=pixel.red;
pixel.blue=pixel.red;
if (image->colorspace == CMYKColorspace)
pixel.index=pixel.red;
SetPixelPacket(random_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image);
(void) NegateImage(dodge_image,MagickFalse);
(void) TransformImage(&dodge_image,(char *) NULL,"50%");
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold)
% MagickBooleanType SolarizeImageChannel(Image *image,
% const ChannelType channel,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold)
{
MagickBooleanType
status;
status=SolarizeImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType SolarizeImageChannel(Image *image,
const ChannelType channel,const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
if ((MagickRealType) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
if ((MagickRealType) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
if ((MagickRealType) GetPixelRed(q) > threshold)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
if ((MagickRealType) GetPixelGreen(q) > threshold)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
if ((MagickRealType) GetPixelBlue(q) > threshold)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelPacket
pixel;
register PixelPacket
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stegano_image->exception);
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
(void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception);
if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (PixelPacket *) NULL)
break;
switch (c)
{
case 0:
{
SetBit(GetPixelRed(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 1:
{
SetBit(GetPixelGreen(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 2:
{
SetBit(GetPixelBlue(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (stegano_image->storage_class == PseudoClass)
(void) SyncImage(stegano_image);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stereo_image->exception);
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const PixelPacket
*restrict p,
*restrict q;
register ssize_t
x;
register PixelPacket
*restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(r,GetPixelRed(p));
SetPixelGreen(r,GetPixelGreen(q));
SetPixelBlue(r,GetPixelBlue(q));
SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2);
p++;
q++;
r++;
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*image_view,
*swirl_view;
Image
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
radius;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
swirl_image=CloneImage(image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse)
{
InheritException(exception,&swirl_image->exception);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.opacity != OpaqueOpacity)
swirl_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(swirl_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,swirl_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
distance;
PointInfo
delta;
register IndexPacket
*restrict swirl_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
MagickRealType
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/
scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+
center.y),&pixel,exception);
SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *opacity,
% const PixelPacket tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *opacity,
const PixelPacket tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
color_vector,
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&tint_image->exception);
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelGray(&tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace);
if (opacity == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
else
pixel.green=pixel.red;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
else
pixel.blue=pixel.red;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
else
pixel.opacity=(MagickRealType) OpaqueOpacity;
color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0-
GetPixelIntensity(tint_image,&tint));
color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0-
GetPixelIntensity(tint_image,&tint));
color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0-
GetPixelIntensity(tint_image,&tint));
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
weight;
weight=QuantumScale*GetPixelRed(p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
SetPixelRed(q,ClampToQuantum(pixel.red));
weight=QuantumScale*GetPixelGreen(p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0-(4.0*
(weight*weight)));
SetPixelGreen(q,ClampToQuantum(pixel.green));
weight=QuantumScale*GetPixelBlue(p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MaxTextExtent];
DrawInfo
*draw_info;
Image
*canvas_image,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse)
{
InheritException(exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->matte=MagickTrue;
oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows,
MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorDatabase("#000000",&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorDatabase("#ffffff",&draw_info->fill,exception);
(void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception);
(void) FormatLocaleString(ellipse,MaxTextExtent,
"ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0,
image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->matte=MagickFalse;
(void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*image_view,
*wave_view;
Image
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse)
{
InheritException(exception,&wave_image->exception);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.opacity != OpaqueOpacity)
wave_image->matte=MagickTrue;
/*
Allocate sine map.
*/
sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (MagickRealType *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(wave_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(wave_view);
pixel=zero;
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel,
exception);
SetPixelPacket(wave_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
image_view=DestroyCacheView(image_view);
sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
|
is.c | /*************************************************************************
* *
* N A S P A R A L L E L B E N C H M A R K S 3.3 *
* *
* O p e n M P V E R S I O N *
* *
* I S *
* *
*************************************************************************
* *
* This benchmark is an OpenMP version of the NPB IS code. *
* It is described in NAS Technical Report 99-011. *
* *
* Permission to use, copy, distribute and modify this software *
* for any purpose with or without fee is hereby granted. We *
* request, however, that all derived work reference the NAS *
* Parallel Benchmarks 3.3. This software is provided "as is" *
* without express or implied warranty. *
* *
* Information on NPB 3.3, including the technical report, the *
* original specifications, source code, results and information *
* on how to submit new results, is available at: *
* *
* http://www.nas.nasa.gov/Software/NPB/ *
* *
* Send comments or suggestions to npb@nas.nasa.gov *
* *
* NAS Parallel Benchmarks Group *
* NASA Ames Research Center *
* Mail Stop: T27A-1 *
* Moffett Field, CA 94035-1000 *
* *
* E-mail: npb@nas.nasa.gov *
* Fax: (650) 604-3957 *
* *
*************************************************************************
* *
* Author: M. Yarrow *
* H. Jin *
* *
*************************************************************************/
#define _OPENMP
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* To disable the use of buckets, comment out the following line */
#define USE_BUCKETS
/* Uncomment below for cyclic schedule */
/*#define SCHED_CYCLIC*/
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS D */
/*************/
#if CLASS == 'D'
#define TOTAL_KEYS_LOG_2 31
#define MAX_KEY_LOG_2 27
#define NUM_BUCKETS_LOG_2 10
#endif
#if CLASS == 'D'
#define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2)
#else
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#endif
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
#if CLASS == 'D'
typedef long INT_TYPE;
#else
typedef int INT_TYPE;
#endif
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[MAX_KEY],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE],
**key_buff1_aptr = NULL;
#ifdef USE_BUCKETS
INT_TYPE **bucket_size,
bucket_ptrs[NUM_BUCKETS];
#pragma omp threadprivate(bucket_ptrs)
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895},
D_test_index_array[TEST_ARRAY_SIZE] =
{1317351170,995930646,1157283250,1503301535,1453734525},
D_test_rank_array[TEST_ARRAY_SIZE] =
{1,36538729,1978098519,2145192618,2147425337};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
void full_verify( void );
void c_print_results( char *name,
char class,
int n1,
int n2,
int n3,
int niter,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
static int KS=0;
static double R23, R46, T23, T46;
#pragma omp threadprivate(KS, R23, R46, T23, T46)
double randlc( double *X, double *A )
{
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
#pragma rose_outline
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
#pragma rose_outline
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************ F I N D _ M Y _ S E E D ************/
/************ ************/
/************ returns parallel random number seq seed ************/
/*****************************************************************/
/*
* Create a random number sequence of total length nn residing
* on np number of processors. Each processor will therefore have a
* subsequence of length nn/np. This routine returns that random
* number which is the first random number for the subsequence belonging
* to processor rank kn, and which is used as seed for proc kn ran # gen.
*/
double find_my_seed( int kn, /* my processor rank, 0<=kn<=num procs */
int np, /* np = num procs */
long nn, /* total num of ran numbers, all procs */
double s, /* Ran num seed, for ex.: 314159265.00 */
double a ) /* Ran num gen mult, try 1220703125.00 */
{
double t1,t2;
long mq,nq,kk,ik;
if ( kn == 0 ) return s;
mq = (nn/4 + np - 1) / np;
nq = mq * 4 * kn; /* number of rans to be skipped */
t1 = s;
t2 = a;
kk = nq;
#pragma rose_outline
while ( kk > 1 ) {
ik = kk / 2;
if( 2 * ik == kk ) {
(void)randlc( &t2, &t2 );
kk = ik;
}
else {
(void)randlc( &t1, &t2 );
kk = kk - 1;
}
}
(void)randlc( &t1, &t2 );
return( t1 );
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x, s;
INT_TYPE i, k;
#pragma omp parallel private(x,s,i,k)
#pragma rose_outline
{
INT_TYPE k1, k2;
double an = a;
int myid, num_procs;
INT_TYPE mq;
#ifdef _OPENMP
// myid = omp_get_thread_num();
// num_procs = omp_get_num_threads();
#else
myid = 0;
num_procs = 1;
#endif
mq = (NUM_KEYS + num_procs - 1) / num_procs;
k1 = mq * myid;
k2 = k1 + mq;
if ( k2 > NUM_KEYS ) k2 = NUM_KEYS;
KS = 0;
s = find_my_seed( myid, num_procs,
(long)4*NUM_KEYS, seed, an );
k = MAX_KEY/4;
for (i=k1; i<k2; i++)
{
x = randlc(&s, &an);
x += randlc(&s, &an);
x += randlc(&s, &an);
x += randlc(&s, &an);
key_array[i] = k*x;
}
} /*omp parallel*/
}
/*****************************************************************/
/***************** Allocate Working Buffer ****************/
/*****************************************************************/
void *alloc_mem( size_t size )
{
void *p;
p = (void *)malloc(size);
if (!p) {
perror("Memory allocation error");
exit(1);
}
return p;
}
void alloc_key_buff( void )
{
INT_TYPE i;
int num_procs;
#ifdef _OPENMP
// num_procs = omp_get_max_threads();
#else
num_procs = 1;
#endif
#ifdef USE_BUCKETS
bucket_size = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs);
#pragma rose_outline
for (i = 0; i < num_procs; i++) {
bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS);
}
#pragma omp parallel for
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = 0;
#else /*USE_BUCKETS*/
key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs);
key_buff1_aptr[0] = key_buff1;
for (i = 1; i < num_procs; i++) {
key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY);
}
#endif /*USE_BUCKETS*/
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify( void )
{
INT_TYPE i, j;
INT_TYPE k, k1, k2;
/* Now, finally, sort the keys: */
/* Copy keys into work array; keys in key_array will be reassigned. */
#ifdef USE_BUCKETS
/* Buckets are already sorted. Sorting keys within each bucket */
#ifdef SCHED_CYCLIC
#pragma omp parallel for private(i,j,k,k1) schedule(static,1)
#else
#pragma omp parallel for private(i,j,k,k1) schedule(dynamic)
#endif
#pragma rose_outline
for( j=0; j< NUM_BUCKETS; j++ ) {
k1 = (j > 0)? bucket_ptrs[j-1] : 0;
for ( i = k1; i < bucket_ptrs[j]; i++ ) {
k = --key_buff_ptr_global[key_buff2[i]];
key_array[k] = key_buff2[i];
}
}
#else
#pragma omp parallel private(i,j,k,k1,k2)
#pragma rose_outline
{
#pragma omp for
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = key_array[i];
/* This is actual sorting. Each thread is responsible for
a subset of key values */
// j = omp_get_num_threads();
j = (MAX_KEY + j - 1) / j;
// k1 = j * omp_get_thread_num();
k2 = k1 + j;
if (k2 > MAX_KEY) k2 = MAX_KEY;
for( i=0; i<NUM_KEYS; i++ ) {
if (key_buff2[i] >= k1 && key_buff2[i] < k2) {
k = --key_buff_ptr_global[key_buff2[i]];
key_array[k] = key_buff2[i];
}
}
} /*omp parallel*/
#endif
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#pragma omp parallel for reduction(+:j)
#pragma rose_outline
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
printf( "Full_verify: number of keys out of sort: %ld\n", (long)j );
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, k;
INT_TYPE *key_buff_ptr, *key_buff_ptr2;
#ifdef USE_BUCKETS
int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE num_bucket_keys = (1L << shift);
#endif
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Setup pointers to key buffers */
#ifdef USE_BUCKETS
key_buff_ptr2 = key_buff2;
#else
key_buff_ptr2 = key_array;
#endif
key_buff_ptr = key_buff1;
#pragma omp parallel private(i, k)
//#pragma rose_outline
{
INT_TYPE *work_buff, m, k1, k2;
int myid = 0, num_procs = 1;
#ifdef _OPENMP
// myid = omp_get_thread_num();
// num_procs = omp_get_num_threads();
#endif
/* Bucket sort is known to improve cache performance on some */
/* cache based systems. But the actual performance may depend */
/* on cache size, problem size. */
#ifdef USE_BUCKETS
work_buff = bucket_size[myid];
/* Initialize */
for( i=0; i<NUM_BUCKETS; i++ )
work_buff[i] = 0;
/* Determine the number of keys in each bucket */
#pragma omp for schedule(static)
for( i=0; i<NUM_KEYS; i++ )
work_buff[key_array[i] >> shift]++;
/* Accumulative bucket sizes are the bucket pointers.
These are global sizes accumulated upon to each bucket */
bucket_ptrs[0] = 0;
for( k=0; k< myid; k++ )
bucket_ptrs[0] += bucket_size[k][0];
#pragma rose_outline
for( i=1; i< NUM_BUCKETS; i++ ) {
bucket_ptrs[i] = bucket_ptrs[i-1];
for( k=0; k< myid; k++ )
bucket_ptrs[i] += bucket_size[k][i];
for( k=myid; k< num_procs; k++ )
bucket_ptrs[i] += bucket_size[k][i-1];
}
/* Sort into appropriate bucket */
#pragma omp for schedule(static)
#pragma rose_outline
for( i=0; i<NUM_KEYS; i++ )
{
k = key_array[i];
key_buff2[bucket_ptrs[k >> shift]++] = k;
}
/* The bucket pointers now point to the final accumulated sizes */
#pragma rose_outline
if (myid < num_procs-1) {
for( i=0; i< NUM_BUCKETS; i++ )
for( k=myid+1; k< num_procs; k++ )
bucket_ptrs[i] += bucket_size[k][i];
}
/* Now, buckets are sorted. We only need to sort keys inside
each bucket, which can be done in parallel. Because the distribution
of the number of keys in the buckets is Gaussian, the use of
a dynamic schedule should improve load balance, thus, performance */
#ifdef SCHED_CYCLIC
#pragma omp for schedule(static,1)
#else
#pragma omp for schedule(dynamic)
#endif
#pragma rose_outline
for( i=0; i< NUM_BUCKETS; i++ ) {
/* Clear the work array section associated with each bucket */
k1 = i * num_bucket_keys;
k2 = k1 + num_bucket_keys;
for ( k = k1; k < k2; k++ )
key_buff_ptr[k] = 0;
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
m = (i > 0)? bucket_ptrs[i-1] : 0;
for ( k = m; k < bucket_ptrs[i]; k++ )
key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */
/* population */
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
key_buff_ptr[k1] += m;
for ( k = k1+1; k < k2; k++ )
key_buff_ptr[k] += key_buff_ptr[k-1];
}
#else /*USE_BUCKETS*/
work_buff = key_buff1_aptr[myid];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
work_buff[i] = 0;
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
#pragma omp for nowait schedule(static)
for( i=0; i<NUM_KEYS; i++ )
work_buff[key_buff_ptr2[i]]++; /* Now they have individual key */
/* population */
/* To obtain ranks of each key, successively add the individual key
population */
for( i=0; i<MAX_KEY-1; i++ )
work_buff[i+1] += work_buff[i];
#pragma omp barrier
/* Accumulate the global key population */
#pragma rose_outline
for( k=1; k<num_procs; k++ ) {
#pragma omp for nowait schedule(static)
for( i=0; i<MAX_KEY; i++ )
key_buff_ptr[i] += key_buff1_aptr[k][i];
}
#endif /*USE_BUCKETS*/
} /*omp parallel*/
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
#pragma rose_outline
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 < k && k <= NUM_KEYS-1 )
{
INT_TYPE key_rank = key_buff_ptr[k-1];
int failed = 0;
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_rank != test_rank_array[i]+(iteration-2) )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+(iteration-1) )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-(iteration-1) )
failed = 1;
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
case 'D':
if( i < 2 )
{
if( key_rank != test_rank_array[i]+iteration )
failed = 1;
else
passed_verification++;
}
else
{
if( key_rank != test_rank_array[i]-iteration )
failed = 1;
else
passed_verification++;
}
break;
}
if( failed == 1 )
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, (int)i );
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff_ptr;
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main( int argc, char **argv )
{
int i, iteration, timer_on;
double timecounter;
FILE *fp;
/* Initialize timers */
timer_on = 0;
if ((fp = fopen("timer.flag", "r")) != NULL) {
fclose(fp);
timer_on = 1;
}
timer_clear( 0 );
if (timer_on) {
timer_clear( 1 );
timer_clear( 2 );
timer_clear( 3 );
}
if (timer_on) timer_start( 3 );
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
case 'D':
test_index_array[i] = D_test_index_array[i];
test_rank_array[i] = D_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf
( "\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - IS Benchmark\n\n" );
printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
#ifdef _OPENMP
// printf( " Number of available threads: %d\n", omp_get_max_threads() );
#endif
printf( "\n" );
if (timer_on) timer_start( 1 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
alloc_key_buff();
if (timer_on) timer_stop( 1 );
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
if (timer_on) timer_start( 2 );
full_verify();
if (timer_on) timer_stop( 2 );
if (timer_on) timer_stop( 3 );
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
(int)(TOTAL_KEYS/64),
64,
0,
MAX_ITERATIONS,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS );
/* Print additional timers */
if (timer_on) {
double t_total, t_percent;
t_total = timer_read( 3 );
printf("\nAdditional timers -\n");
printf(" Total execution: %8.3f\n", t_total);
if (t_total == 0.0) t_total = 1.0;
timecounter = timer_read(1);
t_percent = timecounter/t_total * 100.;
printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(0);
t_percent = timecounter/t_total * 100.;
printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent);
timecounter = timer_read(2);
t_percent = timecounter/t_total * 100.;
printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent);
}
return 0;
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
fclaw2d_domain.c | /*
Copyright (c) 2012 Carsten Burstedde, Donna Calhoun
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <fclaw_domain.h>
#ifndef P4_TO_P8
#include <fclaw2d_domain.h>
#include <fclaw2d_convenience.h> /* Contains domain_destroy and others */
#include <fclaw2d_patch.h>
#include <fclaw2d_exchange.h>
#include <fclaw2d_global.h>
#else
#include <fclaw3d_domain.h>
#include <fclaw3d_convenience.h> /* Contains domain_destroy and others */
/* when ready include <fclaw3d_patch.h> */
typedef struct fclaw3d_patch_data
{
const fclaw3d_patch_t *real_patch;
}
fclaw3d_patch_data_t;
#endif
/* dimension-independent helper functions first */
#if 0
static fclaw2d_domain_t *
fclaw_domain_get_domain (fclaw_domain_t *d)
{
#ifndef P4_TO_P8
return d->d.d2.domain2;
#else
return d->d.d3.domain3;
#endif
}
#endif
void
fclaw2d_domain_iterate_cb
(fclaw2d_domain_t * d2, fclaw2d_patch_t * patch,
int blockno, int patchno, void *user)
{
fclaw_domain_iterate_t *di = (fclaw_domain_iterate_t *) user;
di->iter (di->d, (fclaw_patch_t *) patch->user, blockno, patchno,
di->user);
}
typedef struct fcd_allocated_patch
{
#ifndef P4_TO_P8
fclaw2d_patch_data_t pd;
#else
fclaw3d_patch_data_t pd;
#endif
fclaw_patch_t p;
}
fcd_allocated_patch_t;
fclaw_domain_t *
fclaw_domain_new2d (fclaw2d_domain_t * domain,
fclaw_domain_callback_t init, void *user)
{
int i, j;
fclaw2d_block_t *block;
fclaw2d_patch_t *patch;
fclaw_domain_t *d;
fclaw_patch_t *p;
fcd_allocated_patch_t *ap;
FCLAW_ASSERT (domain != NULL && domain->mpisize > 0);
/* allocate and set domain itself */
d = FCLAW_ALLOC_ZERO (fclaw_domain_t, 1);
d->dim = P4EST_DIM;
#ifndef P4_TO_P8
d->d.d2.dmagic2 = FCLAW2D_DOMAIN_MAGIC;
d->d.d2.domain2 = domain;
#else
d->d.d3.dmagic3 = FCLAW3D_DOMAIN_MAGIC;
d->d.d3.domain3 = domain;
#endif
sc_mstamp_init (&d->pstamp, 4096 - 3 * sizeof (size_t),
sizeof (fcd_allocated_patch_t));
/* iterate over all patches to initialize */
for (i = 0; i < domain->num_blocks; ++i)
{
block = domain->blocks + i;
for (j = 0; j < block->num_patches; ++j)
{
/* hook the new dimension-independent patch into storage */
ap = (fcd_allocated_patch_t *) sc_mstamp_alloc (&d->pstamp);
patch = block->patches + j;
patch->user = p = &ap->p;
#ifndef P4_TO_P8
p->pd.pd2 = &ap->pd;
p->pd.pd2->real_patch = patch;
#else
p->pd.pd3 = &ap->pd;
p->pd.pd3->real_patch = patch;
#endif
if (init != NULL) {
init (d, p, i, j, user);
}
}
}
/* domain fully constructed */
FCLAW_ASSERT (fclaw_domain_is_valid (d));
return d;
}
void
fclaw_domain_destroy2d (fclaw_domain_t * d,
fclaw_domain_callback_t dele, void *user)
{
int i, j;
fclaw2d_domain_t *domain;
fclaw2d_block_t *block;
fclaw2d_patch_t *patch;
fclaw_patch_t *p;
FCLAW_ASSERT (d->dim == P4EST_DIM);
#ifndef P4_TO_P8
domain = d->d.d2.domain2;
#else
domain = d->d.d3.domain3;
#endif
FCLAW_ASSERT (domain != NULL && domain->mpisize > 0);
FCLAW_ASSERT (d->du.count_set_patch == d->du.count_delete_patch);
/* iterate over all patches to deinitialize */
if (dele != NULL) {
for (i = 0; i < domain->num_blocks; ++i)
{
block = domain->blocks + i;
for (j = 0; j < block->num_patches; ++j)
{
/* free the new dimension-independent patch into storage */
patch = block->patches + j;
p = (fclaw_patch_t *) patch->user;
dele (d, p, i, j, user);
}
}
}
fclaw2d_domain_destroy (domain);
sc_mstamp_reset (&d->pstamp);
FCLAW_FREE (d);
}
#ifndef P4_TO_P8
/* we're holding back with 3d counterparts
since much of this will move into fclaw_domain.c */
/* below follows the previous code unchanged */
void fclaw2d_domain_data_new(fclaw2d_domain_t *domain)
{
fclaw2d_domain_data_t* ddata = (fclaw2d_domain_data_t*) domain->user;
ddata = FCLAW_ALLOC_ZERO (fclaw2d_domain_data_t, 1);
domain->user = ddata;
ddata->count_set_patch = ddata->count_delete_patch = 0;
ddata->domain_exchange = NULL;
ddata->domain_indirect = NULL;
}
void fclaw2d_domain_data_delete(fclaw2d_domain_t* domain)
{
fclaw2d_domain_data_t* ddata = (fclaw2d_domain_data_t*) domain->user;
FCLAW_FREE (ddata);
domain->user = NULL;
}
fclaw2d_domain_data_t *fclaw2d_domain_get_data(fclaw2d_domain_t *domain)
{
return (fclaw2d_domain_data_t *) domain->user;
}
void fclaw2d_domain_setup(fclaw2d_global_t* glob,
fclaw2d_domain_t* new_domain)
{
fclaw2d_domain_t *old_domain = glob->domain;
double t;
if (old_domain == new_domain)
{
fclaw_global_infof("Building initial domain\n");
t = 0;
glob->curr_time = t;//new_domain
}
else
{
fclaw_global_infof("Rebuilding domain\n");
fclaw2d_domain_data_new(new_domain);
}
fclaw_global_infof("Done\n");
}
void fclaw2d_domain_reset(fclaw2d_global_t* glob)
{
fclaw2d_domain_t** domain = &glob->domain;
fclaw2d_domain_data_t *ddata = fclaw2d_domain_get_data (*domain);
int i, j;
for(i = 0; i < (*domain)->num_blocks; i++)
{
fclaw2d_block_t *block = (*domain)->blocks + i;
for(j = 0; j < block->num_patches; j++)
{
/* This is here to delete any patches created during
initialization, and not through regridding */
fclaw2d_patch_t *patch = block->patches + j;
fclaw2d_patch_data_delete(glob,patch);
}
block->user = NULL;
}
if (ddata->domain_exchange != NULL)
{
fclaw2d_exchange_delete(glob);
}
/* Output memory discrepancy for the ClawPatch */
if (ddata->count_set_patch != ddata->count_delete_patch)
{
printf ("[%d] This domain had Clawpatch set %d and deleted %d times\n",
(*domain)->mpirank,
ddata->count_set_patch, ddata->count_delete_patch);
}
fclaw2d_domain_data_delete(*domain); // Delete allocated pointers to set of functions.
fclaw2d_domain_destroy(*domain);
*domain = NULL;
}
void fclaw2d_domain_iterate_level_mthread (fclaw2d_domain_t * domain, int level,
fclaw2d_patch_callback_t pcb, void *user)
{
#if (_OPENMP)
int i, j;
fclaw2d_block_t *block;
fclaw2d_patch_t *patch;
for (i = 0; i < domain->num_blocks; i++)
{
block = domain->blocks + i;
#pragma omp parallel for private(patch,j)
for (j = 0; j < block->num_patches; j++)
{
patch = block->patches + j;
if (patch->level == level)
{
pcb (domain, patch, i, j, user);
}
}
}
#else
fclaw_global_essentialf("fclaw2d_patch_iterator_mthread : We should not be here\n");
#endif
}
#endif /* !P4_TO_P8 */
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int8_int8)
// op(A') function: GB (_unop_tran__minv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sequentialBinarySearch.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
int binarySearch(int left, int right, int t, int token, int *a);
int main()
{
int i, token, n, t = 2, result, *a;
printf("Enter the size of the vector a: ");
scanf("%d", &n);
a = (int *)malloc(n * sizeof(int));
srand(time(NULL));
for (i = 0; i < n; i++)
{
a[i] = 2 * i;
printf("[%d]\t", a[i]);
}
printf("\nEnter the number to search: ");
scanf("%d", &token);
/*
We pass the following values to the function:
- the far left index (0)
- the far right index (n-1)
- the number of threads
- the token to search for
- vector A
*/
result = binarySearch(0, n - 1, t, token, a);
if (result < 0)
printf("\nThe number not is in the vector.");
else
printf("\nThe number is in the position: %d\n", result+1);
free(a);
return 0;
}
int binarySearch(int left, int right, int t, int token, int *a)
{
int index = -1, i;
//Size, is the size of the sub-portion each thread must work on
int size = (right - left + 1) / 2;
omp_set_num_threads(t);
omp_set_nested(1);
#pragma omp parallel shared(a, token, left, size, index)
{
int id = omp_get_thread_num();
/*
leftThread is the leftmost index on which each thread must work.
It is calculated by adding to the index passed to the function the product between
the id of the current thread and the size of its portion of the vector
*/
int leftThread = left + id * size;
/*
rightThread is the rightmost index on which each thread must work.
it is calculated from the sum of its leftThread with its sub-portion size -1
*/
int rightThread = leftThread + size - 1;
int middleThread;
/*
the while loop must be executed until the portions of the vector are greater than 1 and therefore the extreme left
is less than or equal to the extreme right
*/
while (leftThread <= rightThread)
{
middleThread = (rightThread + leftThread) / 2;
if (a[middleThread] == token)
{
/*
If the element in the median position is equal to the token
then the value is stored, and enables the while to exit
*/
index = middleThread;
leftThread = rightThread + 1;
}
else if (token < a[middleThread])
{
/*
Otherwise if the token is less than the element in the median position,
rightThread becomes median-1
*/
rightThread = middleThread - 1;
}
else
{
/*
Otherwise if the token is greater than the element in the median position,
leftThread becomes median+1
*/
leftThread = middleThread + 1;
}
}
}
return index;
}
|
GB_unaryop__identity_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint32
// op(A') function: GB_tran__identity_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr_matvec.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) /* CUDA */
PUSH_RANGE_PAYLOAD("MATVEC",0, hypre_CSRMatrixNumRows(A));
#ifdef HYPRE_BIGINT
HYPRE_Int ierr = hypre_CSRMatrixMatvecDeviceBIGINT( alpha,A,x,beta,b,y,offset );
#else
HYPRE_Int ierr = hypre_CSRMatrixMatvecDevice( alpha,A,x,beta,b,y,offset );
#endif
POP_RANGE;
#elif defined(HYPRE_USING_OPENMP_OFFLOAD) /* OMP 4.5 */
PUSH_RANGE_PAYLOAD("MATVEC-OMP",0, hypre_CSRMatrixNumRows(A));
HYPRE_Int ierr = hypre_CSRMatrixMatvecOutOfPlaceOOMP( alpha,A,x,beta,b,y,offset );
POP_RANGE;
#else /* CPU */
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset;
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
/*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/
HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A);
HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int b_size = hypre_VectorSize(b) - offset;
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
/*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b);
HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp, tempx;
HYPRE_Int i, j, jj, m, ierr=0;
HYPRE_Real xpar=0.7;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
hypre_assert( num_vectors == hypre_VectorNumVectors(b) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size || num_rows != b_size)
ierr = 2;
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = beta*b_data[i];
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows) || num_vectors > 1)
{
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i]*temp;
}
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i];
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] += tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] += tempx;
}
}
}
else // num_vectors > 1
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (j = 0; j < num_vectors; ++j)
{
tempx = 0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] += tempx;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
}
else
{ // JSP: this is currently the only path optimized
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,tempx)
#endif
{
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rows);
hypre_assert(iEnd >= 0 && iEnd <= num_rows);
if (0 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (-1 == temp) // beta == -alpha
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x - y
else if (-1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x + y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x - y)
} // temp == -1
else if (1 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + y)
}
else
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + temp*y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - temp*y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + temp*y)
} // temp != 0 && temp != -1 && temp != 1
} // omp parallel
}
if (x == y) hypre_SeqVectorDestroy(x_tmp);
#endif /* CPU */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvec( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0);
}
#if defined (HYPRE_USING_UNIFIED_MEMORY)
HYPRE_Int
hypre_CSRMatrixMatvec3( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlaceOOMP3(alpha, A, x, beta, y, y, 0);
}
#endif
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp;
HYPRE_Complex *y_data_expand;
HYPRE_Int my_thread_num = 0, offset = 0;
HYPRE_Int i, j, jv, jj;
HYPRE_Int num_threads;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size, HYPRE_MEMORY_HOST);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j,my_thread_num,offset)
#endif
{
my_thread_num = hypre_GetThreadNum();
offset = y_size*my_thread_num;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
/* implied barrier (for threads)*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
}
}
} /* end parallel threaded region */
}
else
{
/* multiple vector case is not threaded */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
if (x == y) hypre_SeqVectorDestroy(x_tmp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int *CF_marker_x,
HYPRE_Int *CF_marker_y,
HYPRE_Int fpt )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, jj;
HYPRE_Int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
#if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY)
HYPRE_Int
hypre_CSRMatrixMatvecDevice( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_BIGINT
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR: hypre_CSRMatvecDevice should not be called when bigint is enabled!");
#else
static cusparseHandle_t handle;
static cusparseMatDescr_t descr;
static HYPRE_Int FirstCall=1;
cusparseStatus_t status;
static cudaStream_t s[10];
static HYPRE_Int myid;
if (b!=y){
PUSH_RANGE_PAYLOAD("MEMCPY",1,y->size-offset);
VecCopy(y->data,b->data,(y->size-offset),HYPRE_STREAM(4));
POP_RANGE
}
if (x==y) hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n");
if (FirstCall){
PUSH_RANGE("FIRST_CALL",4);
handle=getCusparseHandle();
status= cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR:: Matrix descriptor initialization failed\n");
return hypre_error_flag;
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
FirstCall=0;
hypre_int jj;
for(jj=0;jj<5;jj++)
s[jj]=HYPRE_STREAM(jj);
nvtxNameCudaStreamA(s[4], "HYPRE_COMPUTE_STREAM");
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid );
myid++;
POP_RANGE;
}
PUSH_RANGE("PREFETCH+SPMV",2);
hypre_CSRMatrixPrefetchToDevice(A);
hypre_SeqVectorPrefetchToDevice(x);
hypre_SeqVectorPrefetchToDevice(y);
//if (offset!=0) hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n");
cusparseErrchk(cusparseDcsrmv(handle ,
CUSPARSE_OPERATION_NON_TRANSPOSE,
A->num_rows-offset, A->num_cols, A->num_nonzeros,
&alpha, descr,
A->data ,A->i+offset,A->j,
x->data, &beta, y->data+offset));
if (!GetAsyncMode()){
hypre_CheckErrorDevice(cudaStreamSynchronize(s[4]));
}
POP_RANGE;
#endif
return hypre_error_flag;
}
HYPRE_Int
hypre_CSRMatrixMatvecDeviceBIGINT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_BIGINT
static cusparseHandle_t handle;
static cusparseMatDescr_t descr;
static HYPRE_Int FirstCall=1;
cusparseStatus_t status;
static cudaStream_t s[10];
static HYPRE_Int myid;
if (b!=y){
PUSH_RANGE_PAYLOAD("MEMCPY",1,y->size-offset);
VecCopy(y->data,b->data,(y->size-offset),HYPRE_STREAM(4));
POP_RANGE
}
if (x==y) fprintf(stderr,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n");
if (FirstCall){
PUSH_RANGE("FIRST_CALL",4);
handle=getCusparseHandle();
status= cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("ERROR:: Matrix descriptor initialization failed\n");
exit(2);
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
FirstCall=0;
hypre_int jj;
for(jj=0;jj<5;jj++)
s[jj]=HYPRE_STREAM(jj);
nvtxNameCudaStreamA(s[4], "HYPRE_COMPUTE_STREAM");
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid );
myid++;
POP_RANGE;
}
PUSH_RANGE("PREFETCH+SPMV",2);
hypre_int num_rows = hypre_CSRMatrixNumRows(A);
hypre_int num_cols = hypre_CSRMatrixNumCols(A);
hypre_int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
if (A->i_short==NULL) {
A->i_short = hypre_CTAlloc(hypre_int, num_rows + 1, HYPRE_MEMORY_SHARED);
A->j_short = hypre_CTAlloc(hypre_int, num_nonzeros, HYPRE_MEMORY_SHARED);
hypre_CSRMatrixPrefetchToDevice(A);
hypre_CSRMatrixPrefetchToDeviceBIGINT(A);
BigToSmallCopy(A->i_short,A->i,num_rows+1,0);
BigToSmallCopy(A->j_short,A->j,num_nonzeros,0);
hypre_CheckErrorDevice(cudaStreamSynchronize(0));
//hypre_printf("BIGINT MOD :: Arrays copied \n");
}
//hypre_CSRMatrixPrefetchToDevice(A);
hypre_SeqVectorPrefetchToDevice(x);
hypre_SeqVectorPrefetchToDevice(y);
if (offset!=0) hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice \n");
cusparseErrchk(cusparseDcsrmv(handle ,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_rows-offset, num_cols, num_nonzeros,
&alpha, descr,
A->data ,A->i_short+offset,A->j_short,
x->data, &beta, y->data+offset));
if (!GetAsyncMode()){
hypre_CheckErrorDevice(cudaStreamSynchronize(s[4]));
}
POP_RANGE;
#endif
return 0;
}
#endif
|
Fig_12.17_ompVecPi.c | #include <omp.h>
#include <stdio.h>
static long num_steps = 100000;
float step;
int main ()
{
int i;
float x, pi, sum = 0.0;
step = 1.0f / (double) num_steps;
#pragma omp simd private(x) reduction(+:sum)
for (i = 0; i < num_steps; i++) {
x = (i + 0.5f) * step;
sum += 4.0f / (1.0f + x * x);
}
pi = step * sum;
printf("pi=%lf \n", pi);
}
|
LAGraph_tricount.c | //------------------------------------------------------------------------------
// LAGraph_tricount: count the number of triangles in a graph
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_tricount: count the number of triangles in a graph,
// Contributed by Tim Davis, Texas A&M.
// Given a symmetric graph A with no-self edges, LAGraph_tricount counts the
// number of triangles in the graph. A triangle is a clique of size three,
// that is, 3 nodes that are all pairwise connected.
// One of 6 methods are used, defined below where L and U are the strictly
// lower and strictly upper triangular parts of the symmetrix matrix A,
// respectively. Each method computes the same result, ntri:
// 1: Burkhardt: ntri = sum (sum ((A^2) .* A)) / 6
// 2: Cohen: ntri = sum (sum ((L * U) .* A)) / 2
// 3: Sandia: ntri = sum (sum ((L * L) .* L))
// 4: Sandia2: ntri = sum (sum ((U * U) .* U))
// 5: SandiaDot: ntri = sum (sum ((L * U') .* L)). Note that L=U'.
// 6: SandiaDot2: ntri = sum (sum ((U * L') .* U)). Note that U=L'.
// A is a square symmetric matrix, of any type. Its values are ignored,
// (assuming v3.2.0 of SuiteSparse:GraphBLAS is used); otherwise, A must be
// binary. Results are undefined for methods 1 and 2 if self-edges exist in A.
// Results are undefined for all methods if A is unsymmetric.
// TODO use an enum for the above methods.
// All matrices are assumed to be in CSR format (GxB_BY_ROW in
// SuiteSparse:GraphBLAS). The 6 methods work fine if the matrices are in CSC
// format; just the underlying algorithms employed inside SuiteSparse:GraphBLAS
// will differ (dot product vs saxpy, for example). If L and U are in CSC
// format, then the "Dot" methods would use an outer product approach, which is
// slow in SuiteSparse:GraphBLAS (requiring an explicit transpose). The
// auto-sort rule probably needs to be reversed, if A is in CSC format (this is
// not yet tested).
// Methods 1 and 2 are much slower than methods 3 to 6 and take more memory.
// Methods 3 to 6 take a little less memory than methods 1 and 2, are by far
// the fastest methods in general. The methods 3 and 5 compute the same
// intermediate matrix (L*L), and differ only in the way the matrix
// multiplication is done. Method 3 uses an outer-product method (Gustavson's
// method). Method 5 uses dot products (assuming both matrices are in CSR
// format) and does not explicitly transpose U. They are called the "Sandia"
// method since matrices in the KokkosKernels are stored in compressed-sparse
// row form, so (L*L).*L in the KokkosKernel method is equivalent to (L*L).*L
// in SuiteSparse:GraphBLAS when the matrices in SuiteSparse:GraphBLAS are in
// their default format (also by row).
// The new GxB_PAIR_INT64 binary operator in SuiteSparse:GraphBLAS v3.2.0 is
// used in the semiring, if available. This is the function f(x,y)=1, so the
// values of A are not accessed. They can have any values and any type. Only
// the structure of A. Otherwise, without this operator, the input matrix A
// must be binary.
// Reference: Wolf, Deveci, Berry, Hammond, Rajamanickam, 'Fast linear algebra-
// based triangle counting with KokkosKernels', IEEE HPEC'17,
// https://dx.doi.org/10.1109/HPEC.2017.8091043,
#include "LAGraph_internal.h"
#include "GB_msort_2.h"
//------------------------------------------------------------------------------
// tricount_prep: construct L and U
//------------------------------------------------------------------------------
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
GrB_free (&thunk) ; \
GrB_free (L) ; \
GrB_free (U) ;
static GrB_Info tricount_prep
(
GrB_Matrix *L,
GrB_Matrix *U,
GrB_Matrix A
)
{
GrB_Index n, *I = NULL, *J = NULL ;
bool *X = NULL ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,0,1) )
//----------------------------------------------------------------------
// build L and/or U with GxB_select
//----------------------------------------------------------------------
GxB_Scalar thunk ;
LAGr_Matrix_nrows (&n, A) ;
LAGr_Scalar_new (&thunk, GrB_INT64) ;
if (L != NULL)
{
// L = tril (A,-1)
LAGr_Matrix_new (L, GrB_BOOL, n, n) ;
LAGr_Scalar_setElement (thunk, -1) ;
LAGr_select (*L, NULL, NULL, GxB_TRIL, A, thunk, NULL) ;
}
if (U != NULL)
{
// U = triu (A,1)
LAGr_Matrix_new (U, GrB_BOOL, n, n) ;
LAGr_Scalar_setElement (thunk, 1) ;
LAGr_select (*U, NULL, NULL, GxB_TRIU, A, thunk, NULL) ;
}
LAGr_free (&thunk) ;
#else
//----------------------------------------------------------------------
// build L and U with extractTuples (slower than GxB_select)
//----------------------------------------------------------------------
GrB_Vector thunk ;
LAGr_Matrix_nrows (&n, A) ;
if (L != NULL || U != NULL)
{
GrB_Index nvals ;
LAGr_Matrix_nvals (&nvals, A) ;
I = LAGraph_malloc (nvals, sizeof (GrB_Index)) ;
J = LAGraph_malloc (nvals, sizeof (GrB_Index)) ;
X = LAGraph_malloc (nvals, sizeof (bool)) ;
if (I == NULL || J == NULL || X == NULL)
{
LAGRAPH_ERROR ("out of memory") ;
}
LAGr_Matrix_extractTuples (I, J, X, &nvals, A) ;
// remove entries in the upper triangular part
nedges = 0 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
if (I [k] > J [k])
{
// keep this entry
I [nedges] = I [k] ;
J [nedges] = J [k] ;
X [nedges] = X [k] ;
nedges++ ;
}
}
if (L != NULL)
{
LAGr_Matrix_new (L, GrB_BOOL, n, n) ;
LAGr_Matrix_build (*L, I, J, X, nedges, GrB_LOR) ;
}
if (U != NULL)
{
LAGr_Matrix_new (U, GrB_BOOL, n, n) ;
LAGr_Matrix_build (*U, J, I, X, nedges, GrB_LOR) ;
}
LAGRAPH_FREE (I) ;
LAGRAPH_FREE (J) ;
LAGRAPH_FREE (X) ;
}
#endif
}
//------------------------------------------------------------------------------
// LAGraph_tricount: count the number of triangles in a graph
//------------------------------------------------------------------------------
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
GrB_free (&C) ; \
GrB_free (&L) ; \
GrB_free (&T) ; \
GrB_free (&U) ; \
LAGRAPH_FREE (W0) ; \
LAGRAPH_FREE (W1) ; \
LAGRAPH_FREE (P) ; \
LAGRAPH_FREE (D) ;
GrB_Info LAGraph_tricount // count # of triangles
(
int64_t *ntri, // # of triangles
const int method, // 1 to 6, see above
int sorting, // 0: no sort
// 1: sort by degree, ascending order
// -1: sort by degree, descending order
// 2: auto selection: no sort if rule is not
// triggered. Otherise: sort in ascending order
// for methods 3 and 5, descending ordering for
// methods 4 and 6.
const int64_t *degree, // degree of each node, may be NULL if sorting==0.
// of size n, unmodified.
const GrB_Matrix A_in // input matrix, must be symmetric, no diag entries
)
{
//--------------------------------------------------------------------------
// check inputs and initialize
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Index n ;
GrB_Matrix C = NULL, L = NULL, U = NULL, T = NULL, A = NULL ;
int64_t *P = NULL, *D = NULL, *W0 = NULL, *W1 = NULL ;
LAGr_Matrix_nrows (&n, A_in) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// the PAIR function is f(x,y)=1, ignoring input values and type
GrB_Descriptor desc_s = GrB_DESC_S ;
GrB_Descriptor desc_st1 = GrB_DESC_ST1 ;
GrB_Semiring semiring = GxB_PLUS_PAIR_INT64 ;
#else
// f(x,y)=x*y, so x and y must be 1 to compute the correct count, and
// thus the input matrix A must be binary.
GrB_Descriptor desc_s = NULL ;
GrB_Descriptor desc_st1 = LAGraph_desc_otoo ;
GrB_Semiring semiring = LAGraph_PLUS_TIMES_INT64 ;
#endif
GrB_Monoid sum = LAGraph_PLUS_INT64_MONOID ;
LAGr_Matrix_new (&C, GrB_INT64, n, n) ;
//--------------------------------------------------------------------------
// heuristic sort rule
//--------------------------------------------------------------------------
if (sorting == 2)
{
// auto selection of sorting method
sorting = 0 ; // default is not to sort
if (method >= 3 && method <= 6)
{
// This rule is very similar to Scott Beamer's rule in the GAP TC
// benchmark, except that it is extended to handle the ascending
// sort needed by methods 3 and 5. It also uses a stricter rule,
// since the performance of triangle counting in GraphBLAS is less
// sensitive to the sorting as compared to the GAP algorithm. This
// is because the dot products in GraphBLAS use binary search if
// one vector is very sparse compared to the other. As a result,
// GraphBLAS needs the sort for fewer matrices, as compared to the
// GAP algorithm.
// With this rule, the GAP-kron and GAP-twitter matrices are
// sorted, and the others remain unsorted. With the rule in the
// GAP tc.cc benchmark, GAP-web is also sorted, but it is not
// sorted here.
#define NSAMPLES 1000
GrB_Index nvals ;
LAGr_Matrix_nvals (&nvals, A_in) ;
if (n > NSAMPLES && ((double) nvals / ((double) n)) >= 10)
{
// pick 1000 nodes at random and determine their degree
// struct drand48_data buffer ;
// srand48_r ((long int) n, &buffer) ;
uint64_t seed = n ;
int64_t samples [NSAMPLES] ;
int64_t dsum = 0 ;
for (int k = 0 ; k < NSAMPLES ; k++)
{
uint64_t result = LAGraph_rand64 (&seed) ;
// lrand48_r (&buffer, &result) ;
int64_t i = result % n ;
int64_t d = degree [i] ;
samples [k] = d ;
dsum += d ;
}
// find the average degree
double sample_average = ((double) dsum) / NSAMPLES ;
// find the median degree
GB_qsort_1a (samples, NSAMPLES) ;
double sample_median = (double) samples [NSAMPLES/2] ;
printf ("average degree: %g\n", sample_average) ;
printf ("median degree: %g\n", sample_median) ;
// sort if the average degree is very high compared to the
// median
if (sample_average > 4 * sample_median)
{
switch (method)
{
case 3: sorting = 1 ; break ; // sort ascending
case 4: sorting = -1 ; break ; // sort descending
case 5: sorting = 1 ; break ; // sort ascending
case 6: sorting = -1 ; break ; // sort descending
default: sorting = 0 ; break ; // no sort
}
}
}
}
printf ("auto sorting: %d: ", sorting) ;
if (sorting == 0) printf ("none") ;
else if (sorting == -1) printf ("descending") ;
else if (sorting == 1) printf ("ascending") ;
printf ("\n") ;
}
//--------------------------------------------------------------------------
// sort the input matrix, if requested
//--------------------------------------------------------------------------
if (sorting != 0)
{
// decide how many threads to use
#define CHUNK (64*1024)
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (nthreads, n/CHUNK) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// allocate workspace
P = LAGraph_malloc (n, sizeof (int64_t)) ;
D = LAGraph_malloc (n, sizeof (int64_t)) ;
W0 = LAGraph_malloc (n, sizeof (int64_t)) ;
W1 = LAGraph_malloc (n, sizeof (int64_t)) ;
if (P == NULL || D == NULL || W0 == NULL || W1 == NULL)
{
// out of memory
LAGRAPH_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// construct the pair [D,P] to sort
if (sorting > 0)
{
printf ("sort ascending\n") ;
// sort [D,P] in ascending order of degree, tie-breaking on P
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < n ; k++)
{
D [k] = degree [k] ;
P [k] = k ;
}
}
else
{
printf ("sort descending\n") ;
// sort [D,P] in descending order of degree, tie-breaking on P
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < n ; k++)
{
D [k] = -degree [k] ;
P [k] = k ;
}
}
// for (int64_t k = 0 ; k < n ; k++)
// {
// printf ("before [%3ld %3ld]\n", D [k], P [k]) ;
// }
GB_msort_2 (D, P, W0, W1, n, nthreads) ;
// printf ("\n") ;
// for (int64_t k = 0 ; k < n ; k++)
// {
// printf ("after [%3ld %3ld]\n", D [k], P [k]) ;
// }
// T = A_in (P,P) and typecast to boolean
LAGr_Matrix_new (&T, GrB_BOOL, n, n) ;
LAGr_extract (T, NULL, NULL, A_in, P, n, P, n, NULL) ;
A = T ;
}
else
{
// use the input matrix as-is
A = A_in ;
}
#if 0
printf ("permuted:\n") ;
GrB_Index ignore ;
GrB_Matrix_nvals (&ignore, A) ;
GxB_print (A, 3) ;
// compute the degree of each node (TODO: make this an LAGraph utility)
GrB_Vector X, D2 ;
LAGr_Vector_new (&X, GrB_BOOL, n) ;
LAGr_Vector_new (&D2, GrB_INT64, n) ;
LAGr_assign (X, NULL, NULL, 0, GrB_ALL, n, NULL) ;
LAGr_assign (D2, NULL, NULL, 0, GrB_ALL, n, NULL) ;
LAGr_vxm (D2, NULL, GrB_PLUS_INT64, GxB_PLUS_PAIR_INT64, X, A, NULL) ;
GxB_print (D2, 3) ;
GrB_free (&X) ;
GrB_Type type ;
GrB_Index n2, nvals2, *Di ;
int64_t *deg ;
LAGr_Vector_export (&D2, &type, &n2, &nvals2, &Di, (void **) °, NULL) ;
if (n != n2 || n != nvals2) { printf ("??\n") ; abort ( ) ; }
printf ("\nNew: sorting %d\n", sorting) ;
for (int i = 0 ; i < 67 ; i++)
{
printf ("node: %d degree %ld\n", i, deg [i]) ;
}
#endif
// free workspace
LAGRAPH_FREE (W0) ;
LAGRAPH_FREE (W1) ;
LAGRAPH_FREE (D) ;
LAGRAPH_FREE (P) ;
//--------------------------------------------------------------------------
// count triangles
//--------------------------------------------------------------------------
switch (method)
{
#if 0
// case 0: // minitri: ntri = nnz (A*E == 2) / 3
// This method requires the incidence matrix E. It is very slow
// compared to the other methods. The construction of E was done
// in the Test/Tricount/*.c driver, and it hasn't been added here.
LAGr_Matrix_ncols (&ne, E) ;
LAGr_free (&C) ;
LAGr_Matrix_new (&C, GrB_INT64, n, ne) ;
LAGr_mxm (C, NULL, NULL, semiring, A, E, NULL) ;
LAGr_Matrix_new (&S, GrB_BOOL, n, ne) ;
LAGr_apply (S, NULL, NULL, LAGraph_ISTWO_INT64, C, NULL) ;
LAGr_reduce (ntri, NULL, sum, S, NULL) ;
(*ntri) /= 3 ;
break ;
#endif
case 1: // Burkhardt: ntri = sum (sum ((A^2) .* A)) / 6
LAGr_mxm (C, A, NULL, semiring, A, A, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
(*ntri) /= 6 ;
break ;
case 2: // Cohen: ntri = sum (sum ((L * U) .* A)) / 2
LAGRAPH_OK (tricount_prep (&L, &U, A)) ;
LAGr_mxm (C, A, NULL, semiring, L, U, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
(*ntri) /= 2 ;
break ;
case 3: // Sandia: ntri = sum (sum ((L * L) .* L))
// using the masked saxpy3 method
LAGRAPH_OK (tricount_prep (&L, NULL, A)) ;
LAGr_mxm (C, L, NULL, semiring, L, L, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
case 4: // Sandia2: ntri = sum (sum ((U * U) .* U))
// using the masked saxpy3 method
LAGRAPH_OK (tricount_prep (NULL, &U, A)) ;
LAGr_mxm (C, U, NULL, semiring, U, U, desc_s) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
case 5: // SandiaDot: ntri = sum (sum ((L * U') .* L))
// This tends to be the fastest method, for most matrices, but the
// Dot2 method is also very fast.
// using the masked dot product
LAGRAPH_OK (tricount_prep (&L, &U, A)) ;
LAGr_mxm (C, L, NULL, semiring, L, U, desc_st1) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
case 6: // SandiaDot2: ntri = sum (sum ((U * L') .* U))
// using the masked dot product
LAGRAPH_OK (tricount_prep (&L, &U, A)) ;
LAGr_mxm (C, U, NULL, semiring, U, L, desc_st1) ;
LAGr_reduce (ntri, NULL, sum, C, NULL) ;
break ;
default: // invalid method
LAGRAPH_FREE_ALL ;
return (GrB_INVALID_VALUE) ;
break ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
LAGRAPH_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
declare_mapper_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s
int temp; // expected-note {{'temp' declared here}}
struct vec { // expected-note {{definition of 'struct vec' is not complete until the closing '}'}}
int len;
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-error {{incomplete definition of type 'struct vec'}}
double *data;
};
#pragma omp declare mapper // expected-error {{expected '(' after 'declare mapper'}}
#pragma omp declare mapper { // expected-error {{expected '(' after 'declare mapper'}}
#pragma omp declare mapper( // expected-error {{expected a type}} expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(# // expected-error {{expected a type}} expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(struct v // expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(struct vec // expected-error {{expected declarator on 'omp declare mapper' directive}}
#pragma omp declare mapper(S v // expected-error {{unknown type name 'S'}}
#pragma omp declare mapper(struct vec v // expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare mapper(aa:struct vec v) // expected-error {{expected at least one clause on '#pragma omp declare mapper' directive}}
#pragma omp declare mapper(bb:struct vec v) private(v) // expected-error {{expected at least one clause on '#pragma omp declare mapper' directive}} // expected-error {{unexpected OpenMP clause 'private' in directive '#pragma omp declare mapper'}}
#pragma omp declare mapper(cc:struct vec v) map(v) ( // expected-warning {{extra tokens at the end of '#pragma omp declare mapper' are ignored}}
#pragma omp declare mapper(++: struct vec v) map(v.len) // expected-error {{illegal OpenMP user-defined mapper identifier}}
#pragma omp declare mapper(id1: struct vec v) map(v.len, temp) // expected-error {{only variable 'v' is allowed in map clauses of this 'omp declare mapper' directive}}
#pragma omp declare mapper(default : struct vec kk) map(kk.data[0:2]) // expected-note {{previous definition is here}}
#pragma omp declare mapper(struct vec v) map(v.len) // expected-error {{redefinition of user-defined mapper for type 'struct vec' with name 'default'}}
#pragma omp declare mapper(int v) map(v) // expected-error {{mapper type must be of struct, union or class type}}
int fun(int arg) {
#pragma omp declare mapper(id: struct vec v) map(v.len)
{
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-note {{previous definition is here}}
#pragma omp declare mapper(id: struct vec v) map(v.len) // expected-error {{redefinition of user-defined mapper for type 'struct vec' with name 'id'}}
{
#pragma omp declare mapper(id: struct vec v) map(v.len) allocate(v) // expected-error {{unexpected OpenMP clause 'allocate' in directive '#pragma omp declare mapper'}}
struct vec vv, v1;
struct vec arr[10];
double d;
#pragma omp target map(mapper) // expected-error {{use of undeclared identifier 'mapper'}}
{}
#pragma omp target map(mapper:vv) // expected-error {{expected '(' after 'mapper'}}
{}
#pragma omp target map(mapper( :vv) // expected-error {{expected expression}} expected-error {{expected ')'}} expected-error {{call to undeclared function 'mapper'}} expected-note {{to match this '('}}
{}
#pragma omp target map(mapper(aa :vv) // expected-error {{use of undeclared identifier 'aa'}} expected-error {{expected ')'}} expected-error {{call to undeclared function 'mapper'}} expected-note {{to match this '('}}
{}
#pragma omp target map(mapper(ab) :vv) // expected-error {{missing map type}} expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}}
{}
#pragma omp target map(mapper(ab) :arr[0:2]) // expected-error {{missing map type}} expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}}
{}
#pragma omp target map(mapper(aa) :vv) // expected-error {{missing map type}}
{}
#pragma omp target map(mapper(aa) to:d) // expected-error {{mapper type must be of struct, union or class type}}
{}
#pragma omp target map(mapper(aa) to:vv) map(close mapper(aa) from:v1) map(mapper(aa) to:arr[0])
{}
#pragma omp target update to(mapper) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper() // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper:vv) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(:vv) // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa :vv) // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(ab):vv) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(ab):arr[0:2]) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa) a:vv) // expected-warning {{missing ':' after ) - ignoring}}
#pragma omp target update to(mapper(aa):d) // expected-error {{mapper type must be of struct, union or class type}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update to(mapper(aa):vv) to(mapper(aa):arr[0])
#pragma omp target update from(mapper) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper() // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected expression}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper:vv) // expected-error {{expected '(' after 'mapper'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(:vv) // expected-error {{illegal OpenMP user-defined mapper identifier}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(aa :vv) // expected-error {{expected ')'}} expected-note {{to match this '('}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(ab):vv) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(ab):arr[0:2]) // expected-error {{cannot find a valid user-defined mapper for type 'struct vec' with name 'ab'}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(aa) a:vv) // expected-warning {{missing ':' after ) - ignoring}}
#pragma omp target update from(mapper(aa):d) // expected-error {{mapper type must be of struct, union or class type}} expected-error {{expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'}}
#pragma omp target update from(mapper(aa):vv) from(mapper(aa):arr[0])
}
}
return arg;
}
|
pt.c | /* Handle parameterized types (templates) for GNU -*- C++ -*-.
Copyright (C) 1992-2020 Free Software Foundation, Inc.
Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing.
Rewritten by Jason Merrill (jason@cygnus.com).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Known bugs or deficiencies include:
all methods must be provided in header files; can't use a source
file that contains only the method templates and "just win". */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "cp-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "varasm.h"
#include "attribs.h"
#include "stor-layout.h"
#include "intl.h"
#include "c-family/c-objc.h"
#include "cp-objcp-common.h"
#include "toplev.h"
#include "tree-iterator.h"
#include "type-utils.h"
#include "gimplify.h"
#include "gcc-rich-location.h"
#include "selftest.h"
#include "target.h"
/* The type of functions taking a tree, and some additional data, and
returning an int. */
typedef int (*tree_fn_t) (tree, void*);
/* The PENDING_TEMPLATES is a TREE_LIST of templates whose
instantiations have been deferred, either because their definitions
were not yet available, or because we were putting off doing the work. */
struct GTY ((chain_next ("%h.next"))) pending_template
{
struct pending_template *next;
struct tinst_level *tinst;
};
static GTY(()) struct pending_template *pending_templates;
static GTY(()) struct pending_template *last_pending_template;
int processing_template_parmlist;
static int template_header_count;
static GTY(()) tree saved_trees;
static vec<int> inline_parm_levels;
static GTY(()) struct tinst_level *current_tinst_level;
static GTY(()) vec<tree, va_gc> *saved_access_scope;
/* Live only within one (recursive) call to tsubst_expr. We use
this to pass the statement expression node from the STMT_EXPR
to the EXPR_STMT that is its result. */
static tree cur_stmt_expr;
// -------------------------------------------------------------------------- //
// Local Specialization Stack
//
// Implementation of the RAII helper for creating new local
// specializations.
local_specialization_stack::local_specialization_stack (lss_policy policy)
: saved (local_specializations)
{
if (policy == lss_nop)
;
else if (policy == lss_blank || !saved)
local_specializations = new hash_map<tree, tree>;
else
local_specializations = new hash_map<tree, tree>(*saved);
}
local_specialization_stack::~local_specialization_stack ()
{
if (local_specializations != saved)
{
delete local_specializations;
local_specializations = saved;
}
}
/* True if we've recursed into fn_type_unification too many times. */
static bool excessive_deduction_depth;
struct GTY((for_user)) spec_entry
{
tree tmpl;
tree args;
tree spec;
};
struct spec_hasher : ggc_ptr_hash<spec_entry>
{
static hashval_t hash (spec_entry *);
static bool equal (spec_entry *, spec_entry *);
};
static GTY (()) hash_table<spec_hasher> *decl_specializations;
static GTY (()) hash_table<spec_hasher> *type_specializations;
/* Contains canonical template parameter types. The vector is indexed by
the TEMPLATE_TYPE_IDX of the template parameter. Each element is a
TREE_LIST, whose TREE_VALUEs contain the canonical template
parameters of various types and levels. */
static GTY(()) vec<tree, va_gc> *canonical_template_parms;
#define UNIFY_ALLOW_NONE 0
#define UNIFY_ALLOW_MORE_CV_QUAL 1
#define UNIFY_ALLOW_LESS_CV_QUAL 2
#define UNIFY_ALLOW_DERIVED 4
#define UNIFY_ALLOW_INTEGER 8
#define UNIFY_ALLOW_OUTER_LEVEL 16
#define UNIFY_ALLOW_OUTER_MORE_CV_QUAL 32
#define UNIFY_ALLOW_OUTER_LESS_CV_QUAL 64
enum template_base_result {
tbr_incomplete_type,
tbr_ambiguous_baseclass,
tbr_success
};
static bool resolve_overloaded_unification (tree, tree, tree, tree,
unification_kind_t, int,
bool);
static int try_one_overload (tree, tree, tree, tree, tree,
unification_kind_t, int, bool, bool);
static int unify (tree, tree, tree, tree, int, bool);
static void add_pending_template (tree);
static tree reopen_tinst_level (struct tinst_level *);
static tree tsubst_initializer_list (tree, tree);
static tree get_partial_spec_bindings (tree, tree, tree);
static tree coerce_template_parms (tree, tree, tree, tsubst_flags_t,
bool, bool);
static tree coerce_innermost_template_parms (tree, tree, tree, tsubst_flags_t,
bool, bool);
static void tsubst_enum (tree, tree, tree);
static tree add_to_template_args (tree, tree);
static bool check_instantiated_args (tree, tree, tsubst_flags_t);
static int check_non_deducible_conversion (tree, tree, int, int,
struct conversion **, bool);
static int maybe_adjust_types_for_deduction (unification_kind_t, tree*, tree*,
tree);
static int type_unification_real (tree, tree, tree, const tree *,
unsigned int, int, unification_kind_t,
vec<deferred_access_check, va_gc> **,
bool);
static void note_template_header (int);
static tree convert_nontype_argument_function (tree, tree, tsubst_flags_t);
static tree convert_nontype_argument (tree, tree, tsubst_flags_t);
static tree convert_template_argument (tree, tree, tree,
tsubst_flags_t, int, tree);
static tree for_each_template_parm (tree, tree_fn_t, void*,
hash_set<tree> *, bool, tree_fn_t = NULL);
static tree expand_template_argument_pack (tree);
static tree build_template_parm_index (int, int, int, tree, tree);
static bool inline_needs_template_parms (tree, bool);
static void push_inline_template_parms_recursive (tree, int);
static tree reduce_template_parm_level (tree, tree, int, tree, tsubst_flags_t);
static int mark_template_parm (tree, void *);
static int template_parm_this_level_p (tree, void *);
static tree tsubst_friend_function (tree, tree);
static tree tsubst_friend_class (tree, tree);
static int can_complete_type_without_circularity (tree);
static tree get_bindings (tree, tree, tree, bool);
static int template_decl_level (tree);
static int check_cv_quals_for_unify (int, tree, tree);
static int unify_pack_expansion (tree, tree, tree,
tree, unification_kind_t, bool, bool);
static tree copy_template_args (tree);
static tree tsubst_template_parms (tree, tree, tsubst_flags_t);
tree most_specialized_partial_spec (tree, tsubst_flags_t);
static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int);
static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree);
static tree tsubst_function_type (tree, tree, tsubst_flags_t, tree);
static bool check_specialization_scope (void);
static tree process_partial_specialization (tree);
static void set_current_access_from_decl (tree);
static enum template_base_result get_template_base (tree, tree, tree, tree,
bool , tree *);
static tree try_class_unification (tree, tree, tree, tree, bool);
static bool class_nttp_const_wrapper_p (tree t);
static int coerce_template_template_parms (tree, tree, tsubst_flags_t,
tree, tree);
static bool template_template_parm_bindings_ok_p (tree, tree);
static void tsubst_default_arguments (tree, tsubst_flags_t);
static tree for_each_template_parm_r (tree *, int *, void *);
static tree copy_default_args_to_explicit_spec_1 (tree, tree);
static void copy_default_args_to_explicit_spec (tree);
static bool invalid_nontype_parm_type_p (tree, tsubst_flags_t);
static bool dependent_template_arg_p (tree);
static bool any_template_arguments_need_structural_equality_p (tree);
static bool dependent_type_p_r (tree);
static tree tsubst_copy (tree, tree, tsubst_flags_t, tree);
static tree tsubst_decl (tree, tree, tsubst_flags_t);
static void perform_typedefs_access_check (tree tmpl, tree targs);
static void append_type_to_template_for_access_check_1 (tree, tree, tree,
location_t);
static tree listify (tree);
static tree listify_autos (tree, tree);
static tree tsubst_template_parm (tree, tree, tsubst_flags_t);
static tree instantiate_alias_template (tree, tree, tsubst_flags_t);
static bool complex_alias_template_p (const_tree tmpl);
static tree get_underlying_template (tree);
static tree tsubst_attributes (tree, tree, tsubst_flags_t, tree);
static tree canonicalize_expr_argument (tree, tsubst_flags_t);
static tree make_argument_pack (tree);
static void register_parameter_specializations (tree, tree);
static tree enclosing_instantiation_of (tree tctx);
/* Make the current scope suitable for access checking when we are
processing T. T can be FUNCTION_DECL for instantiated function
template, VAR_DECL for static member variable, or TYPE_DECL for
alias template (needed by instantiate_decl). */
void
push_access_scope (tree t)
{
gcc_assert (VAR_OR_FUNCTION_DECL_P (t)
|| TREE_CODE (t) == TYPE_DECL);
if (DECL_FRIEND_CONTEXT (t))
push_nested_class (DECL_FRIEND_CONTEXT (t));
else if (DECL_CLASS_SCOPE_P (t))
push_nested_class (DECL_CONTEXT (t));
else
push_to_top_level ();
if (TREE_CODE (t) == FUNCTION_DECL)
{
vec_safe_push (saved_access_scope, current_function_decl);
current_function_decl = t;
}
}
/* Restore the scope set up by push_access_scope. T is the node we
are processing. */
void
pop_access_scope (tree t)
{
if (TREE_CODE (t) == FUNCTION_DECL)
current_function_decl = saved_access_scope->pop();
if (DECL_FRIEND_CONTEXT (t) || DECL_CLASS_SCOPE_P (t))
pop_nested_class ();
else
pop_from_top_level ();
}
/* Do any processing required when DECL (a member template
declaration) is finished. Returns the TEMPLATE_DECL corresponding
to DECL, unless it is a specialization, in which case the DECL
itself is returned. */
tree
finish_member_template_decl (tree decl)
{
if (decl == error_mark_node)
return error_mark_node;
gcc_assert (DECL_P (decl));
if (TREE_CODE (decl) == TYPE_DECL)
{
tree type;
type = TREE_TYPE (decl);
if (type == error_mark_node)
return error_mark_node;
if (MAYBE_CLASS_TYPE_P (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (type))
{
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
check_member_template (tmpl);
return tmpl;
}
return NULL_TREE;
}
else if (TREE_CODE (decl) == FIELD_DECL)
error_at (DECL_SOURCE_LOCATION (decl),
"data member %qD cannot be a member template", decl);
else if (DECL_TEMPLATE_INFO (decl))
{
if (!DECL_TEMPLATE_SPECIALIZATION (decl))
{
check_member_template (DECL_TI_TEMPLATE (decl));
return DECL_TI_TEMPLATE (decl);
}
else
return decl;
}
else
error_at (DECL_SOURCE_LOCATION (decl),
"invalid member template declaration %qD", decl);
return error_mark_node;
}
/* Create a template info node. */
tree
build_template_info (tree template_decl, tree template_args)
{
tree result = make_node (TEMPLATE_INFO);
TI_TEMPLATE (result) = template_decl;
TI_ARGS (result) = template_args;
return result;
}
/* Return the template info node corresponding to T, whatever T is. */
tree
get_template_info (const_tree t)
{
tree tinfo = NULL_TREE;
if (!t || t == error_mark_node)
return NULL;
if (TREE_CODE (t) == NAMESPACE_DECL
|| TREE_CODE (t) == PARM_DECL)
return NULL;
if (DECL_P (t) && DECL_LANG_SPECIFIC (t))
tinfo = DECL_TEMPLATE_INFO (t);
if (!tinfo && DECL_IMPLICIT_TYPEDEF_P (t))
t = TREE_TYPE (t);
if (OVERLOAD_TYPE_P (t))
tinfo = TYPE_TEMPLATE_INFO (t);
else if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM)
tinfo = TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (t);
return tinfo;
}
/* Returns the template nesting level of the indicated class TYPE.
For example, in:
template <class T>
struct A
{
template <class U>
struct B {};
};
A<T>::B<U> has depth two, while A<T> has depth one.
Both A<T>::B<int> and A<int>::B<U> have depth one, if
they are instantiations, not specializations.
This function is guaranteed to return 0 if passed NULL_TREE so
that, for example, `template_class_depth (current_class_type)' is
always safe. */
int
template_class_depth (tree type)
{
int depth;
for (depth = 0; type && TREE_CODE (type) != NAMESPACE_DECL; )
{
tree tinfo = get_template_info (type);
if (tinfo && PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo))
&& uses_template_parms (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo))))
++depth;
if (DECL_P (type))
{
if (tree fctx = DECL_FRIEND_CONTEXT (type))
type = fctx;
else
type = CP_DECL_CONTEXT (type);
}
else if (LAMBDA_TYPE_P (type) && LAMBDA_TYPE_EXTRA_SCOPE (type))
type = LAMBDA_TYPE_EXTRA_SCOPE (type);
else
type = CP_TYPE_CONTEXT (type);
}
return depth;
}
/* Return TRUE if NODE instantiates a template that has arguments of
its own, be it directly a primary template or indirectly through a
partial specializations. */
static bool
instantiates_primary_template_p (tree node)
{
tree tinfo = get_template_info (node);
if (!tinfo)
return false;
tree tmpl = TI_TEMPLATE (tinfo);
if (PRIMARY_TEMPLATE_P (tmpl))
return true;
if (!DECL_TEMPLATE_SPECIALIZATION (tmpl))
return false;
/* So now we know we have a specialization, but it could be a full
or a partial specialization. To tell which, compare the depth of
its template arguments with those of its context. */
tree ctxt = DECL_CONTEXT (tmpl);
tree ctinfo = get_template_info (ctxt);
if (!ctinfo)
return true;
return (TMPL_ARGS_DEPTH (TI_ARGS (tinfo))
> TMPL_ARGS_DEPTH (TI_ARGS (ctinfo)));
}
/* Subroutine of maybe_begin_member_template_processing.
Returns true if processing DECL needs us to push template parms. */
static bool
inline_needs_template_parms (tree decl, bool nsdmi)
{
if (!decl || (!nsdmi && ! DECL_TEMPLATE_INFO (decl)))
return false;
return (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (most_general_template (decl)))
> (processing_template_decl + DECL_TEMPLATE_SPECIALIZATION (decl)));
}
/* Subroutine of maybe_begin_member_template_processing.
Push the template parms in PARMS, starting from LEVELS steps into the
chain, and ending at the beginning, since template parms are listed
innermost first. */
static void
push_inline_template_parms_recursive (tree parmlist, int levels)
{
tree parms = TREE_VALUE (parmlist);
int i;
if (levels > 1)
push_inline_template_parms_recursive (TREE_CHAIN (parmlist), levels - 1);
++processing_template_decl;
current_template_parms
= tree_cons (size_int (processing_template_decl),
parms, current_template_parms);
TEMPLATE_PARMS_FOR_INLINE (current_template_parms) = 1;
begin_scope (TREE_VEC_LENGTH (parms) ? sk_template_parms : sk_template_spec,
NULL);
for (i = 0; i < TREE_VEC_LENGTH (parms); ++i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (error_operand_p (parm))
continue;
gcc_assert (DECL_P (parm));
switch (TREE_CODE (parm))
{
case TYPE_DECL:
case TEMPLATE_DECL:
pushdecl (parm);
break;
case PARM_DECL:
/* Push the CONST_DECL. */
pushdecl (TEMPLATE_PARM_DECL (DECL_INITIAL (parm)));
break;
default:
gcc_unreachable ();
}
}
}
/* Restore the template parameter context for a member template, a
friend template defined in a class definition, or a non-template
member of template class. */
void
maybe_begin_member_template_processing (tree decl)
{
tree parms;
int levels = 0;
bool nsdmi = TREE_CODE (decl) == FIELD_DECL;
if (nsdmi)
{
tree ctx = DECL_CONTEXT (decl);
decl = (CLASSTYPE_TEMPLATE_INFO (ctx)
/* Disregard full specializations (c++/60999). */
&& uses_template_parms (ctx)
? CLASSTYPE_TI_TEMPLATE (ctx) : NULL_TREE);
}
if (inline_needs_template_parms (decl, nsdmi))
{
parms = DECL_TEMPLATE_PARMS (most_general_template (decl));
levels = TMPL_PARMS_DEPTH (parms) - processing_template_decl;
if (DECL_TEMPLATE_SPECIALIZATION (decl))
{
--levels;
parms = TREE_CHAIN (parms);
}
push_inline_template_parms_recursive (parms, levels);
}
/* Remember how many levels of template parameters we pushed so that
we can pop them later. */
inline_parm_levels.safe_push (levels);
}
/* Undo the effects of maybe_begin_member_template_processing. */
void
maybe_end_member_template_processing (void)
{
int i;
int last;
if (inline_parm_levels.length () == 0)
return;
last = inline_parm_levels.pop ();
for (i = 0; i < last; ++i)
{
--processing_template_decl;
current_template_parms = TREE_CHAIN (current_template_parms);
poplevel (0, 0, 0);
}
}
/* Return a new template argument vector which contains all of ARGS,
but has as its innermost set of arguments the EXTRA_ARGS. */
static tree
add_to_template_args (tree args, tree extra_args)
{
tree new_args;
int extra_depth;
int i;
int j;
if (args == NULL_TREE || extra_args == error_mark_node)
return extra_args;
extra_depth = TMPL_ARGS_DEPTH (extra_args);
new_args = make_tree_vec (TMPL_ARGS_DEPTH (args) + extra_depth);
for (i = 1; i <= TMPL_ARGS_DEPTH (args); ++i)
SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i));
for (j = 1; j <= extra_depth; ++j, ++i)
SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (extra_args, j));
return new_args;
}
/* Like add_to_template_args, but only the outermost ARGS are added to
the EXTRA_ARGS. In particular, all but TMPL_ARGS_DEPTH
(EXTRA_ARGS) levels are added. This function is used to combine
the template arguments from a partial instantiation with the
template arguments used to attain the full instantiation from the
partial instantiation. */
tree
add_outermost_template_args (tree args, tree extra_args)
{
tree new_args;
/* If there are more levels of EXTRA_ARGS than there are ARGS,
something very fishy is going on. */
gcc_assert (TMPL_ARGS_DEPTH (args) >= TMPL_ARGS_DEPTH (extra_args));
/* If *all* the new arguments will be the EXTRA_ARGS, just return
them. */
if (TMPL_ARGS_DEPTH (args) == TMPL_ARGS_DEPTH (extra_args))
return extra_args;
/* For the moment, we make ARGS look like it contains fewer levels. */
TREE_VEC_LENGTH (args) -= TMPL_ARGS_DEPTH (extra_args);
new_args = add_to_template_args (args, extra_args);
/* Now, we restore ARGS to its full dimensions. */
TREE_VEC_LENGTH (args) += TMPL_ARGS_DEPTH (extra_args);
return new_args;
}
/* Return the N levels of innermost template arguments from the ARGS. */
tree
get_innermost_template_args (tree args, int n)
{
tree new_args;
int extra_levels;
int i;
gcc_assert (n >= 0);
/* If N is 1, just return the innermost set of template arguments. */
if (n == 1)
return TMPL_ARGS_LEVEL (args, TMPL_ARGS_DEPTH (args));
/* If we're not removing anything, just return the arguments we were
given. */
extra_levels = TMPL_ARGS_DEPTH (args) - n;
gcc_assert (extra_levels >= 0);
if (extra_levels == 0)
return args;
/* Make a new set of arguments, not containing the outer arguments. */
new_args = make_tree_vec (n);
for (i = 1; i <= n; ++i)
SET_TMPL_ARGS_LEVEL (new_args, i,
TMPL_ARGS_LEVEL (args, i + extra_levels));
return new_args;
}
/* The inverse of get_innermost_template_args: Return all but the innermost
EXTRA_LEVELS levels of template arguments from the ARGS. */
static tree
strip_innermost_template_args (tree args, int extra_levels)
{
tree new_args;
int n = TMPL_ARGS_DEPTH (args) - extra_levels;
int i;
gcc_assert (n >= 0);
/* If N is 1, just return the outermost set of template arguments. */
if (n == 1)
return TMPL_ARGS_LEVEL (args, 1);
/* If we're not removing anything, just return the arguments we were
given. */
gcc_assert (extra_levels >= 0);
if (extra_levels == 0)
return args;
/* Make a new set of arguments, not containing the inner arguments. */
new_args = make_tree_vec (n);
for (i = 1; i <= n; ++i)
SET_TMPL_ARGS_LEVEL (new_args, i,
TMPL_ARGS_LEVEL (args, i));
return new_args;
}
/* We've got a template header coming up; push to a new level for storing
the parms. */
void
begin_template_parm_list (void)
{
/* We use a non-tag-transparent scope here, which causes pushtag to
put tags in this scope, rather than in the enclosing class or
namespace scope. This is the right thing, since we want
TEMPLATE_DECLS, and not TYPE_DECLS for template classes. For a
global template class, push_template_decl handles putting the
TEMPLATE_DECL into top-level scope. For a nested template class,
e.g.:
template <class T> struct S1 {
template <class T> struct S2 {};
};
pushtag contains special code to insert the TEMPLATE_DECL for S2
at the right scope. */
begin_scope (sk_template_parms, NULL);
++processing_template_decl;
++processing_template_parmlist;
note_template_header (0);
/* Add a dummy parameter level while we process the parameter list. */
current_template_parms
= tree_cons (size_int (processing_template_decl),
make_tree_vec (0),
current_template_parms);
}
/* This routine is called when a specialization is declared. If it is
invalid to declare a specialization here, an error is reported and
false is returned, otherwise this routine will return true. */
static bool
check_specialization_scope (void)
{
tree scope = current_scope ();
/* [temp.expl.spec]
An explicit specialization shall be declared in the namespace of
which the template is a member, or, for member templates, in the
namespace of which the enclosing class or enclosing class
template is a member. An explicit specialization of a member
function, member class or static data member of a class template
shall be declared in the namespace of which the class template
is a member. */
if (scope && TREE_CODE (scope) != NAMESPACE_DECL)
{
error ("explicit specialization in non-namespace scope %qD", scope);
return false;
}
/* [temp.expl.spec]
In an explicit specialization declaration for a member of a class
template or a member template that appears in namespace scope,
the member template and some of its enclosing class templates may
remain unspecialized, except that the declaration shall not
explicitly specialize a class member template if its enclosing
class templates are not explicitly specialized as well. */
if (current_template_parms)
{
error ("enclosing class templates are not explicitly specialized");
return false;
}
return true;
}
/* We've just seen template <>. */
bool
begin_specialization (void)
{
begin_scope (sk_template_spec, NULL);
note_template_header (1);
return check_specialization_scope ();
}
/* Called at then end of processing a declaration preceded by
template<>. */
void
end_specialization (void)
{
finish_scope ();
reset_specialization ();
}
/* Any template <>'s that we have seen thus far are not referring to a
function specialization. */
void
reset_specialization (void)
{
processing_specialization = 0;
template_header_count = 0;
}
/* We've just seen a template header. If SPECIALIZATION is nonzero,
it was of the form template <>. */
static void
note_template_header (int specialization)
{
processing_specialization = specialization;
template_header_count++;
}
/* We're beginning an explicit instantiation. */
void
begin_explicit_instantiation (void)
{
gcc_assert (!processing_explicit_instantiation);
processing_explicit_instantiation = true;
}
void
end_explicit_instantiation (void)
{
gcc_assert (processing_explicit_instantiation);
processing_explicit_instantiation = false;
}
/* An explicit specialization or partial specialization of TMPL is being
declared. Check that the namespace in which the specialization is
occurring is permissible. Returns false iff it is invalid to
specialize TMPL in the current namespace. */
static bool
check_specialization_namespace (tree tmpl)
{
tree tpl_ns = decl_namespace_context (tmpl);
/* [tmpl.expl.spec]
An explicit specialization shall be declared in a namespace enclosing the
specialized template. An explicit specialization whose declarator-id is
not qualified shall be declared in the nearest enclosing namespace of the
template, or, if the namespace is inline (7.3.1), any namespace from its
enclosing namespace set. */
if (current_scope() != DECL_CONTEXT (tmpl)
&& !at_namespace_scope_p ())
{
error ("specialization of %qD must appear at namespace scope", tmpl);
return false;
}
if (is_nested_namespace (current_namespace, tpl_ns, cxx_dialect < cxx11))
/* Same or enclosing namespace. */
return true;
else
{
auto_diagnostic_group d;
if (permerror (input_location,
"specialization of %qD in different namespace", tmpl))
inform (DECL_SOURCE_LOCATION (tmpl),
" from definition of %q#D", tmpl);
return false;
}
}
/* SPEC is an explicit instantiation. Check that it is valid to
perform this explicit instantiation in the current namespace. */
static void
check_explicit_instantiation_namespace (tree spec)
{
tree ns;
/* DR 275: An explicit instantiation shall appear in an enclosing
namespace of its template. */
ns = decl_namespace_context (spec);
if (!is_nested_namespace (current_namespace, ns))
permerror (input_location, "explicit instantiation of %qD in namespace %qD "
"(which does not enclose namespace %qD)",
spec, current_namespace, ns);
}
/* Returns the type of a template specialization only if that
specialization needs to be defined. Otherwise (e.g., if the type has
already been defined), the function returns NULL_TREE. */
static tree
maybe_new_partial_specialization (tree type)
{
/* An implicit instantiation of an incomplete type implies
the definition of a new class template.
template<typename T>
struct S;
template<typename T>
struct S<T*>;
Here, S<T*> is an implicit instantiation of S whose type
is incomplete. */
if (CLASSTYPE_IMPLICIT_INSTANTIATION (type) && !COMPLETE_TYPE_P (type))
return type;
/* It can also be the case that TYPE is a completed specialization.
Continuing the previous example, suppose we also declare:
template<typename T>
requires Integral<T>
struct S<T*>;
Here, S<T*> refers to the specialization S<T*> defined
above. However, we need to differentiate definitions because
we intend to define a new partial specialization. In this case,
we rely on the fact that the constraints are different for
this declaration than that above.
Note that we also get here for injected class names and
late-parsed template definitions. We must ensure that we
do not create new type declarations for those cases. */
if (flag_concepts && CLASSTYPE_TEMPLATE_SPECIALIZATION (type))
{
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
tree args = CLASSTYPE_TI_ARGS (type);
/* If there are no template parameters, this cannot be a new
partial template specialization? */
if (!current_template_parms)
return NULL_TREE;
/* The injected-class-name is not a new partial specialization. */
if (DECL_SELF_REFERENCE_P (TYPE_NAME (type)))
return NULL_TREE;
/* If the constraints are not the same as those of the primary
then, we can probably create a new specialization. */
tree type_constr = current_template_constraints ();
if (type == TREE_TYPE (tmpl))
{
tree main_constr = get_constraints (tmpl);
if (equivalent_constraints (type_constr, main_constr))
return NULL_TREE;
}
/* Also, if there's a pre-existing specialization with matching
constraints, then this also isn't new. */
tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl);
while (specs)
{
tree spec_tmpl = TREE_VALUE (specs);
tree spec_args = TREE_PURPOSE (specs);
tree spec_constr = get_constraints (spec_tmpl);
if (comp_template_args (args, spec_args)
&& equivalent_constraints (type_constr, spec_constr))
return NULL_TREE;
specs = TREE_CHAIN (specs);
}
/* Create a new type node (and corresponding type decl)
for the newly declared specialization. */
tree t = make_class_type (TREE_CODE (type));
CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (type);
SET_TYPE_TEMPLATE_INFO (t, build_template_info (tmpl, args));
/* We only need a separate type node for storing the definition of this
partial specialization; uses of S<T*> are unconstrained, so all are
equivalent. So keep TYPE_CANONICAL the same. */
TYPE_CANONICAL (t) = TYPE_CANONICAL (type);
/* Build the corresponding type decl. */
tree d = create_implicit_typedef (DECL_NAME (tmpl), t);
DECL_CONTEXT (d) = TYPE_CONTEXT (t);
DECL_SOURCE_LOCATION (d) = input_location;
TREE_PRIVATE (d) = (current_access_specifier == access_private_node);
TREE_PROTECTED (d) = (current_access_specifier == access_protected_node);
return t;
}
return NULL_TREE;
}
/* The TYPE is being declared. If it is a template type, that means it
is a partial specialization. Do appropriate error-checking. */
tree
maybe_process_partial_specialization (tree type)
{
tree context;
if (type == error_mark_node)
return error_mark_node;
/* A lambda that appears in specialization context is not itself a
specialization. */
if (CLASS_TYPE_P (type) && CLASSTYPE_LAMBDA_EXPR (type))
return type;
if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
error ("name of class shadows template template parameter %qD",
TYPE_NAME (type));
return error_mark_node;
}
context = TYPE_CONTEXT (type);
if (TYPE_ALIAS_P (type))
{
tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (type);
if (tinfo && DECL_ALIAS_TEMPLATE_P (TI_TEMPLATE (tinfo)))
error ("specialization of alias template %qD",
TI_TEMPLATE (tinfo));
else
error ("explicit specialization of non-template %qT", type);
return error_mark_node;
}
else if (CLASS_TYPE_P (type) && CLASSTYPE_USE_TEMPLATE (type))
{
/* This is for ordinary explicit specialization and partial
specialization of a template class such as:
template <> class C<int>;
or:
template <class T> class C<T*>;
Make sure that `C<int>' and `C<T*>' are implicit instantiations. */
if (tree t = maybe_new_partial_specialization (type))
{
if (!check_specialization_namespace (CLASSTYPE_TI_TEMPLATE (t))
&& !at_namespace_scope_p ())
return error_mark_node;
SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (t);
DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (t)) = input_location;
if (processing_template_decl)
{
tree decl = push_template_decl (TYPE_MAIN_DECL (t));
if (decl == error_mark_node)
return error_mark_node;
return TREE_TYPE (decl);
}
}
else if (CLASSTYPE_TEMPLATE_INSTANTIATION (type))
error ("specialization of %qT after instantiation", type);
else if (errorcount && !processing_specialization
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (type)
&& !uses_template_parms (CLASSTYPE_TI_ARGS (type)))
/* Trying to define a specialization either without a template<> header
or in an inappropriate place. We've already given an error, so just
bail now so we don't actually define the specialization. */
return error_mark_node;
}
else if (CLASS_TYPE_P (type)
&& !CLASSTYPE_USE_TEMPLATE (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& context && CLASS_TYPE_P (context)
&& CLASSTYPE_TEMPLATE_INFO (context))
{
/* This is for an explicit specialization of member class
template according to [temp.expl.spec/18]:
template <> template <class U> class C<int>::D;
The context `C<int>' must be an implicit instantiation.
Otherwise this is just a member class template declared
earlier like:
template <> class C<int> { template <class U> class D; };
template <> template <class U> class C<int>::D;
In the first case, `C<int>::D' is a specialization of `C<T>::D'
while in the second case, `C<int>::D' is a primary template
and `C<T>::D' may not exist. */
if (CLASSTYPE_IMPLICIT_INSTANTIATION (context)
&& !COMPLETE_TYPE_P (type))
{
tree t;
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
if (current_namespace
!= decl_namespace_context (tmpl))
{
if (permerror (input_location,
"specialization of %qD in different namespace",
type))
inform (DECL_SOURCE_LOCATION (tmpl),
"from definition of %q#D", tmpl);
}
/* Check for invalid specialization after instantiation:
template <> template <> class C<int>::D<int>;
template <> template <class U> class C<int>::D; */
for (t = DECL_TEMPLATE_INSTANTIATIONS (tmpl);
t; t = TREE_CHAIN (t))
{
tree inst = TREE_VALUE (t);
if (CLASSTYPE_TEMPLATE_SPECIALIZATION (inst)
|| !COMPLETE_OR_OPEN_TYPE_P (inst))
{
/* We already have a full specialization of this partial
instantiation, or a full specialization has been
looked up but not instantiated. Reassign it to the
new member specialization template. */
spec_entry elt;
spec_entry *entry;
elt.tmpl = most_general_template (tmpl);
elt.args = CLASSTYPE_TI_ARGS (inst);
elt.spec = inst;
type_specializations->remove_elt (&elt);
elt.tmpl = tmpl;
CLASSTYPE_TI_ARGS (inst)
= elt.args = INNERMOST_TEMPLATE_ARGS (elt.args);
spec_entry **slot
= type_specializations->find_slot (&elt, INSERT);
entry = ggc_alloc<spec_entry> ();
*entry = elt;
*slot = entry;
}
else
/* But if we've had an implicit instantiation, that's a
problem ([temp.expl.spec]/6). */
error ("specialization %qT after instantiation %qT",
type, inst);
}
/* Mark TYPE as a specialization. And as a result, we only
have one level of template argument for the innermost
class template. */
SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (type);
DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)) = input_location;
CLASSTYPE_TI_ARGS (type)
= INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type));
}
}
else if (processing_specialization)
{
/* Someday C++0x may allow for enum template specialization. */
if (cxx_dialect > cxx98 && TREE_CODE (type) == ENUMERAL_TYPE
&& CLASS_TYPE_P (context) && CLASSTYPE_USE_TEMPLATE (context))
pedwarn (input_location, OPT_Wpedantic, "template specialization "
"of %qD not allowed by ISO C++", type);
else
{
error ("explicit specialization of non-template %qT", type);
return error_mark_node;
}
}
return type;
}
/* Returns nonzero if we can optimize the retrieval of specializations
for TMPL, a TEMPLATE_DECL. In particular, for such a template, we
do not use DECL_TEMPLATE_SPECIALIZATIONS at all. */
static inline bool
optimize_specialization_lookup_p (tree tmpl)
{
return (DECL_FUNCTION_TEMPLATE_P (tmpl)
&& DECL_CLASS_SCOPE_P (tmpl)
/* DECL_CLASS_SCOPE_P holds of T::f even if T is a template
parameter. */
&& CLASS_TYPE_P (DECL_CONTEXT (tmpl))
/* The optimized lookup depends on the fact that the
template arguments for the member function template apply
purely to the containing class, which is not true if the
containing class is an explicit or partial
specialization. */
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (tmpl))
&& !DECL_MEMBER_TEMPLATE_P (tmpl)
&& !DECL_CONV_FN_P (tmpl)
/* It is possible to have a template that is not a member
template and is not a member of a template class:
template <typename T>
struct S { friend A::f(); };
Here, the friend function is a template, but the context does
not have template information. The optimized lookup relies
on having ARGS be the template arguments for both the class
and the function template. */
&& !DECL_FRIEND_P (DECL_TEMPLATE_RESULT (tmpl)));
}
/* Make sure ARGS doesn't use any inappropriate typedefs; we should have
gone through coerce_template_parms by now. */
static void
verify_unstripped_args_1 (tree inner)
{
for (int i = 0; i < TREE_VEC_LENGTH (inner); ++i)
{
tree arg = TREE_VEC_ELT (inner, i);
if (TREE_CODE (arg) == TEMPLATE_DECL)
/* OK */;
else if (TYPE_P (arg))
gcc_assert (strip_typedefs (arg, NULL) == arg);
else if (ARGUMENT_PACK_P (arg))
verify_unstripped_args_1 (ARGUMENT_PACK_ARGS (arg));
else if (strip_typedefs (TREE_TYPE (arg), NULL) != TREE_TYPE (arg))
/* Allow typedefs on the type of a non-type argument, since a
parameter can have them. */;
else
gcc_assert (strip_typedefs_expr (arg, NULL) == arg);
}
}
static void
verify_unstripped_args (tree args)
{
++processing_template_decl;
if (!any_dependent_template_arguments_p (args))
verify_unstripped_args_1 (INNERMOST_TEMPLATE_ARGS (args));
--processing_template_decl;
}
/* Retrieve the specialization (in the sense of [temp.spec] - a
specialization is either an instantiation or an explicit
specialization) of TMPL for the given template ARGS. If there is
no such specialization, return NULL_TREE. The ARGS are a vector of
arguments, or a vector of vectors of arguments, in the case of
templates with more than one level of parameters.
If TMPL is a type template and CLASS_SPECIALIZATIONS_P is true,
then we search for a partial specialization matching ARGS. This
parameter is ignored if TMPL is not a class template.
We can also look up a FIELD_DECL, if it is a lambda capture pack; the
result is a NONTYPE_ARGUMENT_PACK. */
static tree
retrieve_specialization (tree tmpl, tree args, hashval_t hash)
{
if (tmpl == NULL_TREE)
return NULL_TREE;
if (args == error_mark_node)
return NULL_TREE;
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL
|| TREE_CODE (tmpl) == FIELD_DECL);
/* There should be as many levels of arguments as there are
levels of parameters. */
gcc_assert (TMPL_ARGS_DEPTH (args)
== (TREE_CODE (tmpl) == TEMPLATE_DECL
? TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl))
: template_class_depth (DECL_CONTEXT (tmpl))));
if (flag_checking)
verify_unstripped_args (args);
/* Lambda functions in templates aren't instantiated normally, but through
tsubst_lambda_expr. */
if (lambda_fn_in_template_p (tmpl))
return NULL_TREE;
if (optimize_specialization_lookup_p (tmpl))
{
/* The template arguments actually apply to the containing
class. Find the class specialization with those
arguments. */
tree class_template = CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (tmpl));
tree class_specialization
= retrieve_specialization (class_template, args, 0);
if (!class_specialization)
return NULL_TREE;
/* Find the instance of TMPL. */
tree fns = get_class_binding (class_specialization, DECL_NAME (tmpl));
for (ovl_iterator iter (fns); iter; ++iter)
{
tree fn = *iter;
if (tree ti = get_template_info (fn))
if (TI_TEMPLATE (ti) == tmpl
/* using-declarations can bring in a different
instantiation of tmpl as a member of a different
instantiation of tmpl's class. We don't want those
here. */
&& DECL_CONTEXT (fn) == class_specialization)
return fn;
}
return NULL_TREE;
}
else
{
spec_entry *found;
spec_entry elt;
hash_table<spec_hasher> *specializations;
elt.tmpl = tmpl;
elt.args = args;
elt.spec = NULL_TREE;
if (DECL_CLASS_TEMPLATE_P (tmpl))
specializations = type_specializations;
else
specializations = decl_specializations;
if (hash == 0)
hash = spec_hasher::hash (&elt);
found = specializations->find_with_hash (&elt, hash);
if (found)
return found->spec;
}
return NULL_TREE;
}
/* Like retrieve_specialization, but for local declarations. */
tree
retrieve_local_specialization (tree tmpl)
{
if (local_specializations == NULL)
return NULL_TREE;
tree *slot = local_specializations->get (tmpl);
return slot ? *slot : NULL_TREE;
}
/* Returns nonzero iff DECL is a specialization of TMPL. */
int
is_specialization_of (tree decl, tree tmpl)
{
tree t;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
for (t = decl;
t != NULL_TREE;
t = DECL_TEMPLATE_INFO (t) ? DECL_TI_TEMPLATE (t) : NULL_TREE)
if (t == tmpl)
return 1;
}
else
{
gcc_assert (TREE_CODE (decl) == TYPE_DECL);
for (t = TREE_TYPE (decl);
t != NULL_TREE;
t = CLASSTYPE_USE_TEMPLATE (t)
? TREE_TYPE (CLASSTYPE_TI_TEMPLATE (t)) : NULL_TREE)
if (same_type_ignoring_top_level_qualifiers_p (t, TREE_TYPE (tmpl)))
return 1;
}
return 0;
}
/* Returns nonzero iff DECL is a specialization of friend declaration
FRIEND_DECL according to [temp.friend]. */
bool
is_specialization_of_friend (tree decl, tree friend_decl)
{
bool need_template = true;
int template_depth;
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL
|| TREE_CODE (decl) == TYPE_DECL);
/* For [temp.friend/6] when FRIEND_DECL is an ordinary member function
of a template class, we want to check if DECL is a specialization
if this. */
if (TREE_CODE (friend_decl) == FUNCTION_DECL
&& DECL_TEMPLATE_INFO (friend_decl)
&& !DECL_USE_TEMPLATE (friend_decl))
{
/* We want a TEMPLATE_DECL for `is_specialization_of'. */
friend_decl = DECL_TI_TEMPLATE (friend_decl);
need_template = false;
}
else if (TREE_CODE (friend_decl) == TEMPLATE_DECL
&& !PRIMARY_TEMPLATE_P (friend_decl))
need_template = false;
/* There is nothing to do if this is not a template friend. */
if (TREE_CODE (friend_decl) != TEMPLATE_DECL)
return false;
if (is_specialization_of (decl, friend_decl))
return true;
/* [temp.friend/6]
A member of a class template may be declared to be a friend of a
non-template class. In this case, the corresponding member of
every specialization of the class template is a friend of the
class granting friendship.
For example, given a template friend declaration
template <class T> friend void A<T>::f();
the member function below is considered a friend
template <> struct A<int> {
void f();
};
For this type of template friend, TEMPLATE_DEPTH below will be
nonzero. To determine if DECL is a friend of FRIEND, we first
check if the enclosing class is a specialization of another. */
template_depth = template_class_depth (CP_DECL_CONTEXT (friend_decl));
if (template_depth
&& DECL_CLASS_SCOPE_P (decl)
&& is_specialization_of (TYPE_NAME (DECL_CONTEXT (decl)),
CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (friend_decl))))
{
/* Next, we check the members themselves. In order to handle
a few tricky cases, such as when FRIEND_DECL's are
template <class T> friend void A<T>::g(T t);
template <class T> template <T t> friend void A<T>::h();
and DECL's are
void A<int>::g(int);
template <int> void A<int>::h();
we need to figure out ARGS, the template arguments from
the context of DECL. This is required for template substitution
of `T' in the function parameter of `g' and template parameter
of `h' in the above examples. Here ARGS corresponds to `int'. */
tree context = DECL_CONTEXT (decl);
tree args = NULL_TREE;
int current_depth = 0;
while (current_depth < template_depth)
{
if (CLASSTYPE_TEMPLATE_INFO (context))
{
if (current_depth == 0)
args = TYPE_TI_ARGS (context);
else
args = add_to_template_args (TYPE_TI_ARGS (context), args);
current_depth++;
}
context = TYPE_CONTEXT (context);
}
if (TREE_CODE (decl) == FUNCTION_DECL)
{
bool is_template;
tree friend_type;
tree decl_type;
tree friend_args_type;
tree decl_args_type;
/* Make sure that both DECL and FRIEND_DECL are templates or
non-templates. */
is_template = DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl));
if (need_template ^ is_template)
return false;
else if (is_template)
{
/* If both are templates, check template parameter list. */
tree friend_parms
= tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl),
args, tf_none);
if (!comp_template_parms
(DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (decl)),
friend_parms))
return false;
decl_type = TREE_TYPE (DECL_TI_TEMPLATE (decl));
}
else
decl_type = TREE_TYPE (decl);
friend_type = tsubst_function_type (TREE_TYPE (friend_decl), args,
tf_none, NULL_TREE);
if (friend_type == error_mark_node)
return false;
/* Check if return types match. */
if (!same_type_p (TREE_TYPE (decl_type), TREE_TYPE (friend_type)))
return false;
/* Check if function parameter types match, ignoring the
`this' parameter. */
friend_args_type = TYPE_ARG_TYPES (friend_type);
decl_args_type = TYPE_ARG_TYPES (decl_type);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (friend_decl))
friend_args_type = TREE_CHAIN (friend_args_type);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
decl_args_type = TREE_CHAIN (decl_args_type);
return compparms (decl_args_type, friend_args_type);
}
else
{
/* DECL is a TYPE_DECL */
bool is_template;
tree decl_type = TREE_TYPE (decl);
/* Make sure that both DECL and FRIEND_DECL are templates or
non-templates. */
is_template
= CLASSTYPE_TEMPLATE_INFO (decl_type)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (decl_type));
if (need_template ^ is_template)
return false;
else if (is_template)
{
tree friend_parms;
/* If both are templates, check the name of the two
TEMPLATE_DECL's first because is_friend didn't. */
if (DECL_NAME (CLASSTYPE_TI_TEMPLATE (decl_type))
!= DECL_NAME (friend_decl))
return false;
/* Now check template parameter list. */
friend_parms
= tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl),
args, tf_none);
return comp_template_parms
(DECL_TEMPLATE_PARMS (CLASSTYPE_TI_TEMPLATE (decl_type)),
friend_parms);
}
else
return (DECL_NAME (decl)
== DECL_NAME (friend_decl));
}
}
return false;
}
/* Register the specialization SPEC as a specialization of TMPL with
the indicated ARGS. IS_FRIEND indicates whether the specialization
is actually just a friend declaration. ATTRLIST is the list of
attributes that the specialization is declared with or NULL when
it isn't. Returns SPEC, or an equivalent prior declaration, if
available.
We also store instantiations of field packs in the hash table, even
though they are not themselves templates, to make lookup easier. */
static tree
register_specialization (tree spec, tree tmpl, tree args, bool is_friend,
hashval_t hash)
{
tree fn;
spec_entry **slot = NULL;
spec_entry elt;
gcc_assert ((TREE_CODE (tmpl) == TEMPLATE_DECL && DECL_P (spec))
|| (TREE_CODE (tmpl) == FIELD_DECL
&& TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK));
if (TREE_CODE (spec) == FUNCTION_DECL
&& uses_template_parms (DECL_TI_ARGS (spec)))
/* This is the FUNCTION_DECL for a partial instantiation. Don't
register it; we want the corresponding TEMPLATE_DECL instead.
We use `uses_template_parms (DECL_TI_ARGS (spec))' rather than
the more obvious `uses_template_parms (spec)' to avoid problems
with default function arguments. In particular, given
something like this:
template <class T> void f(T t1, T t = T())
the default argument expression is not substituted for in an
instantiation unless and until it is actually needed. */
return spec;
if (optimize_specialization_lookup_p (tmpl))
/* We don't put these specializations in the hash table, but we might
want to give an error about a mismatch. */
fn = retrieve_specialization (tmpl, args, 0);
else
{
elt.tmpl = tmpl;
elt.args = args;
elt.spec = spec;
if (hash == 0)
hash = spec_hasher::hash (&elt);
slot =
decl_specializations->find_slot_with_hash (&elt, hash, INSERT);
if (*slot)
fn = ((spec_entry *) *slot)->spec;
else
fn = NULL_TREE;
}
/* We can sometimes try to re-register a specialization that we've
already got. In particular, regenerate_decl_from_template calls
duplicate_decls which will update the specialization list. But,
we'll still get called again here anyhow. It's more convenient
to simply allow this than to try to prevent it. */
if (fn == spec)
return spec;
else if (fn && DECL_TEMPLATE_SPECIALIZATION (spec))
{
if (DECL_TEMPLATE_INSTANTIATION (fn))
{
if (DECL_ODR_USED (fn)
|| DECL_EXPLICIT_INSTANTIATION (fn))
{
error ("specialization of %qD after instantiation",
fn);
return error_mark_node;
}
else
{
tree clone;
/* This situation should occur only if the first
specialization is an implicit instantiation, the
second is an explicit specialization, and the
implicit instantiation has not yet been used. That
situation can occur if we have implicitly
instantiated a member function and then specialized
it later.
We can also wind up here if a friend declaration that
looked like an instantiation turns out to be a
specialization:
template <class T> void foo(T);
class S { friend void foo<>(int) };
template <> void foo(int);
We transform the existing DECL in place so that any
pointers to it become pointers to the updated
declaration.
If there was a definition for the template, but not
for the specialization, we want this to look as if
there were no definition, and vice versa. */
DECL_INITIAL (fn) = NULL_TREE;
duplicate_decls (spec, fn, is_friend);
/* The call to duplicate_decls will have applied
[temp.expl.spec]:
An explicit specialization of a function template
is inline only if it is explicitly declared to be,
and independently of whether its function template
is.
to the primary function; now copy the inline bits to
the various clones. */
FOR_EACH_CLONE (clone, fn)
{
DECL_DECLARED_INLINE_P (clone)
= DECL_DECLARED_INLINE_P (fn);
DECL_SOURCE_LOCATION (clone)
= DECL_SOURCE_LOCATION (fn);
DECL_DELETED_FN (clone)
= DECL_DELETED_FN (fn);
}
check_specialization_namespace (tmpl);
return fn;
}
}
else if (DECL_TEMPLATE_SPECIALIZATION (fn))
{
tree dd = duplicate_decls (spec, fn, is_friend);
if (dd == error_mark_node)
/* We've already complained in duplicate_decls. */
return error_mark_node;
if (dd == NULL_TREE && DECL_INITIAL (spec))
/* Dup decl failed, but this is a new definition. Set the
line number so any errors match this new
definition. */
DECL_SOURCE_LOCATION (fn) = DECL_SOURCE_LOCATION (spec);
return fn;
}
}
else if (fn)
return duplicate_decls (spec, fn, is_friend);
/* A specialization must be declared in the same namespace as the
template it is specializing. */
if (DECL_P (spec) && DECL_TEMPLATE_SPECIALIZATION (spec)
&& !check_specialization_namespace (tmpl))
DECL_CONTEXT (spec) = DECL_CONTEXT (tmpl);
if (slot != NULL /* !optimize_specialization_lookup_p (tmpl) */)
{
spec_entry *entry = ggc_alloc<spec_entry> ();
gcc_assert (tmpl && args && spec);
*entry = elt;
*slot = entry;
if ((TREE_CODE (spec) == FUNCTION_DECL && DECL_NAMESPACE_SCOPE_P (spec)
&& PRIMARY_TEMPLATE_P (tmpl)
&& DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (tmpl)) == NULL_TREE)
|| variable_template_p (tmpl))
/* If TMPL is a forward declaration of a template function, keep a list
of all specializations in case we need to reassign them to a friend
template later in tsubst_friend_function.
Also keep a list of all variable template instantiations so that
process_partial_specialization can check whether a later partial
specialization would have used it. */
DECL_TEMPLATE_INSTANTIATIONS (tmpl)
= tree_cons (args, spec, DECL_TEMPLATE_INSTANTIATIONS (tmpl));
}
return spec;
}
/* Returns true iff two spec_entry nodes are equivalent. */
int comparing_specializations;
bool
spec_hasher::equal (spec_entry *e1, spec_entry *e2)
{
int equal;
++comparing_specializations;
equal = (e1->tmpl == e2->tmpl
&& comp_template_args (e1->args, e2->args));
if (equal && flag_concepts
/* tmpl could be a FIELD_DECL for a capture pack. */
&& TREE_CODE (e1->tmpl) == TEMPLATE_DECL
&& VAR_P (DECL_TEMPLATE_RESULT (e1->tmpl))
&& uses_template_parms (e1->args))
{
/* Partial specializations of a variable template can be distinguished by
constraints. */
tree c1 = e1->spec ? get_constraints (e1->spec) : NULL_TREE;
tree c2 = e2->spec ? get_constraints (e2->spec) : NULL_TREE;
equal = equivalent_constraints (c1, c2);
}
--comparing_specializations;
return equal;
}
/* Returns a hash for a template TMPL and template arguments ARGS. */
static hashval_t
hash_tmpl_and_args (tree tmpl, tree args)
{
hashval_t val = iterative_hash_object (DECL_UID (tmpl), 0);
return iterative_hash_template_arg (args, val);
}
/* Returns a hash for a spec_entry node based on the TMPL and ARGS members,
ignoring SPEC. */
hashval_t
spec_hasher::hash (spec_entry *e)
{
return hash_tmpl_and_args (e->tmpl, e->args);
}
/* Recursively calculate a hash value for a template argument ARG, for use
in the hash tables of template specializations. We must be
careful to (at least) skip the same entities template_args_equal
does. */
hashval_t
iterative_hash_template_arg (tree arg, hashval_t val)
{
if (arg == NULL_TREE)
return iterative_hash_object (arg, val);
if (!TYPE_P (arg))
/* Strip nop-like things, but not the same as STRIP_NOPS. */
while (CONVERT_EXPR_P (arg)
|| TREE_CODE (arg) == NON_LVALUE_EXPR
|| class_nttp_const_wrapper_p (arg))
arg = TREE_OPERAND (arg, 0);
enum tree_code code = TREE_CODE (arg);
val = iterative_hash_object (code, val);
switch (code)
{
case ARGUMENT_PACK_SELECT:
gcc_unreachable ();
case ERROR_MARK:
return val;
case IDENTIFIER_NODE:
return iterative_hash_object (IDENTIFIER_HASH_VALUE (arg), val);
case TREE_VEC:
for (int i = 0, len = TREE_VEC_LENGTH (arg); i < len; ++i)
val = iterative_hash_template_arg (TREE_VEC_ELT (arg, i), val);
return val;
case TYPE_PACK_EXPANSION:
case EXPR_PACK_EXPANSION:
val = iterative_hash_template_arg (PACK_EXPANSION_PATTERN (arg), val);
return iterative_hash_template_arg (PACK_EXPANSION_EXTRA_ARGS (arg), val);
case TYPE_ARGUMENT_PACK:
case NONTYPE_ARGUMENT_PACK:
return iterative_hash_template_arg (ARGUMENT_PACK_ARGS (arg), val);
case TREE_LIST:
for (; arg; arg = TREE_CHAIN (arg))
val = iterative_hash_template_arg (TREE_VALUE (arg), val);
return val;
case OVERLOAD:
for (lkp_iterator iter (arg); iter; ++iter)
val = iterative_hash_template_arg (*iter, val);
return val;
case CONSTRUCTOR:
{
tree field, value;
unsigned i;
iterative_hash_template_arg (TREE_TYPE (arg), val);
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (arg), i, field, value)
{
val = iterative_hash_template_arg (field, val);
val = iterative_hash_template_arg (value, val);
}
return val;
}
case PARM_DECL:
if (!DECL_ARTIFICIAL (arg))
{
val = iterative_hash_object (DECL_PARM_INDEX (arg), val);
val = iterative_hash_object (DECL_PARM_LEVEL (arg), val);
}
return iterative_hash_template_arg (TREE_TYPE (arg), val);
case TARGET_EXPR:
return iterative_hash_template_arg (TARGET_EXPR_INITIAL (arg), val);
case PTRMEM_CST:
val = iterative_hash_template_arg (PTRMEM_CST_CLASS (arg), val);
return iterative_hash_template_arg (PTRMEM_CST_MEMBER (arg), val);
case TEMPLATE_PARM_INDEX:
val = iterative_hash_template_arg
(TREE_TYPE (TEMPLATE_PARM_DECL (arg)), val);
val = iterative_hash_object (TEMPLATE_PARM_LEVEL (arg), val);
return iterative_hash_object (TEMPLATE_PARM_IDX (arg), val);
case TRAIT_EXPR:
val = iterative_hash_object (TRAIT_EXPR_KIND (arg), val);
val = iterative_hash_template_arg (TRAIT_EXPR_TYPE1 (arg), val);
return iterative_hash_template_arg (TRAIT_EXPR_TYPE2 (arg), val);
case BASELINK:
val = iterative_hash_template_arg (BINFO_TYPE (BASELINK_BINFO (arg)),
val);
return iterative_hash_template_arg (DECL_NAME (get_first_fn (arg)),
val);
case MODOP_EXPR:
val = iterative_hash_template_arg (TREE_OPERAND (arg, 0), val);
code = TREE_CODE (TREE_OPERAND (arg, 1));
val = iterative_hash_object (code, val);
return iterative_hash_template_arg (TREE_OPERAND (arg, 2), val);
case LAMBDA_EXPR:
/* [temp.over.link] Two lambda-expressions are never considered
equivalent.
So just hash the closure type. */
return iterative_hash_template_arg (TREE_TYPE (arg), val);
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case STATIC_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case NEW_EXPR:
val = iterative_hash_template_arg (TREE_TYPE (arg), val);
/* Now hash operands as usual. */
break;
case CALL_EXPR:
{
tree fn = CALL_EXPR_FN (arg);
if (tree name = dependent_name (fn))
{
if (TREE_CODE (fn) == TEMPLATE_ID_EXPR)
val = iterative_hash_template_arg (TREE_OPERAND (fn, 1), val);
fn = name;
}
val = iterative_hash_template_arg (fn, val);
call_expr_arg_iterator ai;
for (tree x = first_call_expr_arg (arg, &ai); x;
x = next_call_expr_arg (&ai))
val = iterative_hash_template_arg (x, val);
return val;
}
default:
break;
}
char tclass = TREE_CODE_CLASS (code);
switch (tclass)
{
case tcc_type:
if (tree ats = alias_template_specialization_p (arg, nt_transparent))
{
// We want an alias specialization that survived strip_typedefs
// to hash differently from its TYPE_CANONICAL, to avoid hash
// collisions that compare as different in template_args_equal.
// These could be dependent specializations that strip_typedefs
// left alone, or untouched specializations because
// coerce_template_parms returns the unconverted template
// arguments if it sees incomplete argument packs.
tree ti = TYPE_ALIAS_TEMPLATE_INFO (ats);
return hash_tmpl_and_args (TI_TEMPLATE (ti), TI_ARGS (ti));
}
switch (TREE_CODE (arg))
{
case TEMPLATE_TEMPLATE_PARM:
{
tree tpi = TEMPLATE_TYPE_PARM_INDEX (arg);
/* Do not recurse with TPI directly, as that is unbounded
recursion. */
val = iterative_hash_object (TEMPLATE_PARM_LEVEL (tpi), val);
val = iterative_hash_object (TEMPLATE_PARM_IDX (tpi), val);
}
break;
case DECLTYPE_TYPE:
val = iterative_hash_template_arg (DECLTYPE_TYPE_EXPR (arg), val);
break;
default:
if (tree canonical = TYPE_CANONICAL (arg))
val = iterative_hash_object (TYPE_HASH (canonical), val);
break;
}
return val;
case tcc_declaration:
case tcc_constant:
return iterative_hash_expr (arg, val);
default:
gcc_assert (IS_EXPR_CODE_CLASS (tclass));
for (int i = 0, n = cp_tree_operand_length (arg); i < n; ++i)
val = iterative_hash_template_arg (TREE_OPERAND (arg, i), val);
return val;
}
gcc_unreachable ();
return 0;
}
/* Unregister the specialization SPEC as a specialization of TMPL.
Replace it with NEW_SPEC, if NEW_SPEC is non-NULL. Returns true
if the SPEC was listed as a specialization of TMPL.
Note that SPEC has been ggc_freed, so we can't look inside it. */
bool
reregister_specialization (tree spec, tree tinfo, tree new_spec)
{
spec_entry *entry;
spec_entry elt;
elt.tmpl = most_general_template (TI_TEMPLATE (tinfo));
elt.args = TI_ARGS (tinfo);
elt.spec = NULL_TREE;
entry = decl_specializations->find (&elt);
if (entry != NULL)
{
gcc_assert (entry->spec == spec || entry->spec == new_spec);
gcc_assert (new_spec != NULL_TREE);
entry->spec = new_spec;
return 1;
}
return 0;
}
/* Like register_specialization, but for local declarations. We are
registering SPEC, an instantiation of TMPL. */
void
register_local_specialization (tree spec, tree tmpl)
{
gcc_assert (tmpl != spec);
local_specializations->put (tmpl, spec);
}
/* TYPE is a class type. Returns true if TYPE is an explicitly
specialized class. */
bool
explicit_class_specialization_p (tree type)
{
if (!CLASSTYPE_TEMPLATE_SPECIALIZATION (type))
return false;
return !uses_template_parms (CLASSTYPE_TI_ARGS (type));
}
/* Print the list of functions at FNS, going through all the overloads
for each element of the list. Alternatively, FNS cannot be a
TREE_LIST, in which case it will be printed together with all the
overloads.
MORE and *STR should respectively be FALSE and NULL when the function
is called from the outside. They are used internally on recursive
calls. print_candidates manages the two parameters and leaves NULL
in *STR when it ends. */
static void
print_candidates_1 (tree fns, char **str, bool more = false)
{
if (TREE_CODE (fns) == TREE_LIST)
for (; fns; fns = TREE_CHAIN (fns))
print_candidates_1 (TREE_VALUE (fns), str, more || TREE_CHAIN (fns));
else
for (lkp_iterator iter (fns); iter;)
{
tree cand = *iter;
++iter;
const char *pfx = *str;
if (!pfx)
{
if (more || iter)
pfx = _("candidates are:");
else
pfx = _("candidate is:");
*str = get_spaces (pfx);
}
inform (DECL_SOURCE_LOCATION (cand), "%s %#qD", pfx, cand);
}
}
/* Print the list of candidate FNS in an error message. FNS can also
be a TREE_LIST of non-functions in the case of an ambiguous lookup. */
void
print_candidates (tree fns)
{
char *str = NULL;
print_candidates_1 (fns, &str);
free (str);
}
/* Get a (possibly) constrained template declaration for the
purpose of ordering candidates. */
static tree
get_template_for_ordering (tree list)
{
gcc_assert (TREE_CODE (list) == TREE_LIST);
tree f = TREE_VALUE (list);
if (tree ti = DECL_TEMPLATE_INFO (f))
return TI_TEMPLATE (ti);
return f;
}
/* Among candidates having the same signature, return the
most constrained or NULL_TREE if there is no best candidate.
If the signatures of candidates vary (e.g., template
specialization vs. member function), then there can be no
most constrained.
Note that we don't compare constraints on the functions
themselves, but rather those of their templates. */
static tree
most_constrained_function (tree candidates)
{
// Try to find the best candidate in a first pass.
tree champ = candidates;
for (tree c = TREE_CHAIN (champ); c; c = TREE_CHAIN (c))
{
int winner = more_constrained (get_template_for_ordering (champ),
get_template_for_ordering (c));
if (winner == -1)
champ = c; // The candidate is more constrained
else if (winner == 0)
return NULL_TREE; // Neither is more constrained
}
// Verify that the champ is better than previous candidates.
for (tree c = candidates; c != champ; c = TREE_CHAIN (c)) {
if (!more_constrained (get_template_for_ordering (champ),
get_template_for_ordering (c)))
return NULL_TREE;
}
return champ;
}
/* Returns the template (one of the functions given by TEMPLATE_ID)
which can be specialized to match the indicated DECL with the
explicit template args given in TEMPLATE_ID. The DECL may be
NULL_TREE if none is available. In that case, the functions in
TEMPLATE_ID are non-members.
If NEED_MEMBER_TEMPLATE is nonzero the function is known to be a
specialization of a member template.
The TEMPLATE_COUNT is the number of references to qualifying
template classes that appeared in the name of the function. See
check_explicit_specialization for a more accurate description.
TSK indicates what kind of template declaration (if any) is being
declared. TSK_TEMPLATE indicates that the declaration given by
DECL, though a FUNCTION_DECL, has template parameters, and is
therefore a template function.
The template args (those explicitly specified and those deduced)
are output in a newly created vector *TARGS_OUT.
If it is impossible to determine the result, an error message is
issued. The error_mark_node is returned to indicate failure. */
static tree
determine_specialization (tree template_id,
tree decl,
tree* targs_out,
int need_member_template,
int template_count,
tmpl_spec_kind tsk)
{
tree fns;
tree targs;
tree explicit_targs;
tree candidates = NULL_TREE;
/* A TREE_LIST of templates of which DECL may be a specialization.
The TREE_VALUE of each node is a TEMPLATE_DECL. The
corresponding TREE_PURPOSE is the set of template arguments that,
when used to instantiate the template, would produce a function
with the signature of DECL. */
tree templates = NULL_TREE;
int header_count;
cp_binding_level *b;
*targs_out = NULL_TREE;
if (template_id == error_mark_node || decl == error_mark_node)
return error_mark_node;
/* We shouldn't be specializing a member template of an
unspecialized class template; we already gave an error in
check_specialization_scope, now avoid crashing. */
if (!VAR_P (decl)
&& template_count && DECL_CLASS_SCOPE_P (decl)
&& template_class_depth (DECL_CONTEXT (decl)) > 0)
{
gcc_assert (errorcount);
return error_mark_node;
}
fns = TREE_OPERAND (template_id, 0);
explicit_targs = TREE_OPERAND (template_id, 1);
if (fns == error_mark_node)
return error_mark_node;
/* Check for baselinks. */
if (BASELINK_P (fns))
fns = BASELINK_FUNCTIONS (fns);
if (TREE_CODE (decl) == FUNCTION_DECL && !is_overloaded_fn (fns))
{
error_at (DECL_SOURCE_LOCATION (decl),
"%qD is not a function template", fns);
return error_mark_node;
}
else if (VAR_P (decl) && !variable_template_p (fns))
{
error ("%qD is not a variable template", fns);
return error_mark_node;
}
/* Count the number of template headers specified for this
specialization. */
header_count = 0;
for (b = current_binding_level;
b->kind == sk_template_parms;
b = b->level_chain)
++header_count;
tree orig_fns = fns;
if (variable_template_p (fns))
{
tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (fns));
targs = coerce_template_parms (parms, explicit_targs, fns,
tf_warning_or_error,
/*req_all*/true, /*use_defarg*/true);
if (targs != error_mark_node)
templates = tree_cons (targs, fns, templates);
}
else for (lkp_iterator iter (fns); iter; ++iter)
{
tree fn = *iter;
if (TREE_CODE (fn) == TEMPLATE_DECL)
{
tree decl_arg_types;
tree fn_arg_types;
tree insttype;
/* In case of explicit specialization, we need to check if
the number of template headers appearing in the specialization
is correct. This is usually done in check_explicit_specialization,
but the check done there cannot be exhaustive when specializing
member functions. Consider the following code:
template <> void A<int>::f(int);
template <> template <> void A<int>::f(int);
Assuming that A<int> is not itself an explicit specialization
already, the first line specializes "f" which is a non-template
member function, whilst the second line specializes "f" which
is a template member function. So both lines are syntactically
correct, and check_explicit_specialization does not reject
them.
Here, we can do better, as we are matching the specialization
against the declarations. We count the number of template
headers, and we check if they match TEMPLATE_COUNT + 1
(TEMPLATE_COUNT is the number of qualifying template classes,
plus there must be another header for the member template
itself).
Notice that if header_count is zero, this is not a
specialization but rather a template instantiation, so there
is no check we can perform here. */
if (header_count && header_count != template_count + 1)
continue;
/* Check that the number of template arguments at the
innermost level for DECL is the same as for FN. */
if (current_binding_level->kind == sk_template_parms
&& !current_binding_level->explicit_spec_p
&& (TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (fn))
!= TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS
(current_template_parms))))
continue;
/* DECL might be a specialization of FN. */
decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn));
/* For a non-static member function, we need to make sure
that the const qualification is the same. Since
get_bindings does not try to merge the "this" parameter,
we must do the comparison explicitly. */
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn))
{
if (!same_type_p (TREE_VALUE (fn_arg_types),
TREE_VALUE (decl_arg_types)))
continue;
/* And the ref-qualification. */
if (type_memfn_rqual (TREE_TYPE (decl))
!= type_memfn_rqual (TREE_TYPE (fn)))
continue;
}
/* Skip the "this" parameter and, for constructors of
classes with virtual bases, the VTT parameter. A
full specialization of a constructor will have a VTT
parameter, but a template never will. */
decl_arg_types
= skip_artificial_parms_for (decl, decl_arg_types);
fn_arg_types
= skip_artificial_parms_for (fn, fn_arg_types);
/* Function templates cannot be specializations; there are
no partial specializations of functions. Therefore, if
the type of DECL does not match FN, there is no
match.
Note that it should never be the case that we have both
candidates added here, and for regular member functions
below. */
if (tsk == tsk_template)
{
if (compparms (fn_arg_types, decl_arg_types))
candidates = tree_cons (NULL_TREE, fn, candidates);
continue;
}
/* See whether this function might be a specialization of this
template. Suppress access control because we might be trying
to make this specialization a friend, and we have already done
access control for the declaration of the specialization. */
push_deferring_access_checks (dk_no_check);
targs = get_bindings (fn, decl, explicit_targs, /*check_ret=*/true);
pop_deferring_access_checks ();
if (!targs)
/* We cannot deduce template arguments that when used to
specialize TMPL will produce DECL. */
continue;
if (uses_template_parms (targs))
/* We deduced something involving 'auto', which isn't a valid
template argument. */
continue;
/* Remove, from the set of candidates, all those functions
whose constraints are not satisfied. */
if (flag_concepts && !constraints_satisfied_p (fn, targs))
continue;
// Then, try to form the new function type.
insttype = tsubst (TREE_TYPE (fn), targs, tf_fndecl_type, NULL_TREE);
if (insttype == error_mark_node)
continue;
fn_arg_types
= skip_artificial_parms_for (fn, TYPE_ARG_TYPES (insttype));
if (!compparms (fn_arg_types, decl_arg_types))
continue;
/* Save this template, and the arguments deduced. */
templates = tree_cons (targs, fn, templates);
}
else if (need_member_template)
/* FN is an ordinary member function, and we need a
specialization of a member template. */
;
else if (TREE_CODE (fn) != FUNCTION_DECL)
/* We can get IDENTIFIER_NODEs here in certain erroneous
cases. */
;
else if (!DECL_FUNCTION_MEMBER_P (fn))
/* This is just an ordinary non-member function. Nothing can
be a specialization of that. */
;
else if (DECL_ARTIFICIAL (fn))
/* Cannot specialize functions that are created implicitly. */
;
else
{
tree decl_arg_types;
/* This is an ordinary member function. However, since
we're here, we can assume its enclosing class is a
template class. For example,
template <typename T> struct S { void f(); };
template <> void S<int>::f() {}
Here, S<int>::f is a non-template, but S<int> is a
template class. If FN has the same type as DECL, we
might be in business. */
if (!DECL_TEMPLATE_INFO (fn))
/* Its enclosing class is an explicit specialization
of a template class. This is not a candidate. */
continue;
if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)),
TREE_TYPE (TREE_TYPE (fn))))
/* The return types differ. */
continue;
/* Adjust the type of DECL in case FN is a static member. */
decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
if (DECL_STATIC_FUNCTION_P (fn)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
decl_arg_types = TREE_CHAIN (decl_arg_types);
if (!compparms (TYPE_ARG_TYPES (TREE_TYPE (fn)),
decl_arg_types))
continue;
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)
&& (type_memfn_rqual (TREE_TYPE (decl))
!= type_memfn_rqual (TREE_TYPE (fn))))
continue;
// If the deduced arguments do not satisfy the constraints,
// this is not a candidate.
if (flag_concepts && !constraints_satisfied_p (fn))
continue;
// Add the candidate.
candidates = tree_cons (NULL_TREE, fn, candidates);
}
}
if (templates && TREE_CHAIN (templates))
{
/* We have:
[temp.expl.spec]
It is possible for a specialization with a given function
signature to be instantiated from more than one function
template. In such cases, explicit specification of the
template arguments must be used to uniquely identify the
function template specialization being specialized.
Note that here, there's no suggestion that we're supposed to
determine which of the candidate templates is most
specialized. However, we, also have:
[temp.func.order]
Partial ordering of overloaded function template
declarations is used in the following contexts to select
the function template to which a function template
specialization refers:
-- when an explicit specialization refers to a function
template.
So, we do use the partial ordering rules, at least for now.
This extension can only serve to make invalid programs valid,
so it's safe. And, there is strong anecdotal evidence that
the committee intended the partial ordering rules to apply;
the EDG front end has that behavior, and John Spicer claims
that the committee simply forgot to delete the wording in
[temp.expl.spec]. */
tree tmpl = most_specialized_instantiation (templates);
if (tmpl != error_mark_node)
{
templates = tmpl;
TREE_CHAIN (templates) = NULL_TREE;
}
}
// Concepts allows multiple declarations of member functions
// with the same signature. Like above, we need to rely on
// on the partial ordering of those candidates to determine which
// is the best.
if (flag_concepts && candidates && TREE_CHAIN (candidates))
{
if (tree cand = most_constrained_function (candidates))
{
candidates = cand;
TREE_CHAIN (cand) = NULL_TREE;
}
}
if (templates == NULL_TREE && candidates == NULL_TREE)
{
error ("template-id %qD for %q+D does not match any template "
"declaration", template_id, decl);
if (header_count && header_count != template_count + 1)
inform (DECL_SOURCE_LOCATION (decl),
"saw %d %<template<>%>, need %d for "
"specializing a member function template",
header_count, template_count + 1);
else
print_candidates (orig_fns);
return error_mark_node;
}
else if ((templates && TREE_CHAIN (templates))
|| (candidates && TREE_CHAIN (candidates))
|| (templates && candidates))
{
error ("ambiguous template specialization %qD for %q+D",
template_id, decl);
candidates = chainon (candidates, templates);
print_candidates (candidates);
return error_mark_node;
}
/* We have one, and exactly one, match. */
if (candidates)
{
tree fn = TREE_VALUE (candidates);
*targs_out = copy_node (DECL_TI_ARGS (fn));
/* Propagate the candidate's constraints to the declaration. */
set_constraints (decl, get_constraints (fn));
/* DECL is a re-declaration or partial instantiation of a template
function. */
if (TREE_CODE (fn) == TEMPLATE_DECL)
return fn;
/* It was a specialization of an ordinary member function in a
template class. */
return DECL_TI_TEMPLATE (fn);
}
/* It was a specialization of a template. */
targs = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (TREE_VALUE (templates)));
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (targs))
{
*targs_out = copy_node (targs);
SET_TMPL_ARGS_LEVEL (*targs_out,
TMPL_ARGS_DEPTH (*targs_out),
TREE_PURPOSE (templates));
}
else
*targs_out = TREE_PURPOSE (templates);
return TREE_VALUE (templates);
}
/* Returns a chain of parameter types, exactly like the SPEC_TYPES,
but with the default argument values filled in from those in the
TMPL_TYPES. */
static tree
copy_default_args_to_explicit_spec_1 (tree spec_types,
tree tmpl_types)
{
tree new_spec_types;
if (!spec_types)
return NULL_TREE;
if (spec_types == void_list_node)
return void_list_node;
/* Substitute into the rest of the list. */
new_spec_types =
copy_default_args_to_explicit_spec_1 (TREE_CHAIN (spec_types),
TREE_CHAIN (tmpl_types));
/* Add the default argument for this parameter. */
return hash_tree_cons (TREE_PURPOSE (tmpl_types),
TREE_VALUE (spec_types),
new_spec_types);
}
/* DECL is an explicit specialization. Replicate default arguments
from the template it specializes. (That way, code like:
template <class T> void f(T = 3);
template <> void f(double);
void g () { f (); }
works, as required.) An alternative approach would be to look up
the correct default arguments at the call-site, but this approach
is consistent with how implicit instantiations are handled. */
static void
copy_default_args_to_explicit_spec (tree decl)
{
tree tmpl;
tree spec_types;
tree tmpl_types;
tree new_spec_types;
tree old_type;
tree new_type;
tree t;
tree object_type = NULL_TREE;
tree in_charge = NULL_TREE;
tree vtt = NULL_TREE;
/* See if there's anything we need to do. */
tmpl = DECL_TI_TEMPLATE (decl);
tmpl_types = TYPE_ARG_TYPES (TREE_TYPE (DECL_TEMPLATE_RESULT (tmpl)));
for (t = tmpl_types; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t))
break;
if (!t)
return;
old_type = TREE_TYPE (decl);
spec_types = TYPE_ARG_TYPES (old_type);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
{
/* Remove the this pointer, but remember the object's type for
CV quals. */
object_type = TREE_TYPE (TREE_VALUE (spec_types));
spec_types = TREE_CHAIN (spec_types);
tmpl_types = TREE_CHAIN (tmpl_types);
if (DECL_HAS_IN_CHARGE_PARM_P (decl))
{
/* DECL may contain more parameters than TMPL due to the extra
in-charge parameter in constructors and destructors. */
in_charge = spec_types;
spec_types = TREE_CHAIN (spec_types);
}
if (DECL_HAS_VTT_PARM_P (decl))
{
vtt = spec_types;
spec_types = TREE_CHAIN (spec_types);
}
}
/* Compute the merged default arguments. */
new_spec_types =
copy_default_args_to_explicit_spec_1 (spec_types, tmpl_types);
/* Compute the new FUNCTION_TYPE. */
if (object_type)
{
if (vtt)
new_spec_types = hash_tree_cons (TREE_PURPOSE (vtt),
TREE_VALUE (vtt),
new_spec_types);
if (in_charge)
/* Put the in-charge parameter back. */
new_spec_types = hash_tree_cons (TREE_PURPOSE (in_charge),
TREE_VALUE (in_charge),
new_spec_types);
new_type = build_method_type_directly (object_type,
TREE_TYPE (old_type),
new_spec_types);
}
else
new_type = build_function_type (TREE_TYPE (old_type),
new_spec_types);
new_type = cp_build_type_attribute_variant (new_type,
TYPE_ATTRIBUTES (old_type));
new_type = cxx_copy_lang_qualifiers (new_type, old_type);
TREE_TYPE (decl) = new_type;
}
/* Return the number of template headers we expect to see for a definition
or specialization of CTYPE or one of its non-template members. */
int
num_template_headers_for_class (tree ctype)
{
int num_templates = 0;
while (ctype && CLASS_TYPE_P (ctype))
{
/* You're supposed to have one `template <...>' for every
template class, but you don't need one for a full
specialization. For example:
template <class T> struct S{};
template <> struct S<int> { void f(); };
void S<int>::f () {}
is correct; there shouldn't be a `template <>' for the
definition of `S<int>::f'. */
if (!CLASSTYPE_TEMPLATE_INFO (ctype))
/* If CTYPE does not have template information of any
kind, then it is not a template, nor is it nested
within a template. */
break;
if (explicit_class_specialization_p (ctype))
break;
if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (ctype)))
++num_templates;
ctype = TYPE_CONTEXT (ctype);
}
return num_templates;
}
/* Do a simple sanity check on the template headers that precede the
variable declaration DECL. */
void
check_template_variable (tree decl)
{
tree ctx = CP_DECL_CONTEXT (decl);
int wanted = num_template_headers_for_class (ctx);
if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)))
{
if (cxx_dialect < cxx14)
pedwarn (DECL_SOURCE_LOCATION (decl), 0,
"variable templates only available with "
"%<-std=c++14%> or %<-std=gnu++14%>");
// Namespace-scope variable templates should have a template header.
++wanted;
}
if (template_header_count > wanted)
{
auto_diagnostic_group d;
bool warned = pedwarn (DECL_SOURCE_LOCATION (decl), 0,
"too many template headers for %qD "
"(should be %d)",
decl, wanted);
if (warned && CLASS_TYPE_P (ctx)
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (ctx))
inform (DECL_SOURCE_LOCATION (decl),
"members of an explicitly specialized class are defined "
"without a template header");
}
}
/* An explicit specialization whose declarator-id or class-head-name is not
qualified shall be declared in the nearest enclosing namespace of the
template, or, if the namespace is inline (7.3.1), any namespace from its
enclosing namespace set.
If the name declared in the explicit instantiation is an unqualified name,
the explicit instantiation shall appear in the namespace where its template
is declared or, if that namespace is inline (7.3.1), any namespace from its
enclosing namespace set. */
void
check_unqualified_spec_or_inst (tree t, location_t loc)
{
tree tmpl = most_general_template (t);
if (DECL_NAMESPACE_SCOPE_P (tmpl)
&& !is_nested_namespace (current_namespace,
CP_DECL_CONTEXT (tmpl), true))
{
if (processing_specialization)
permerror (loc, "explicit specialization of %qD outside its "
"namespace must use a nested-name-specifier", tmpl);
else if (processing_explicit_instantiation
&& cxx_dialect >= cxx11)
/* This was allowed in C++98, so only pedwarn. */
pedwarn (loc, OPT_Wpedantic, "explicit instantiation of %qD "
"outside its namespace must use a nested-name-"
"specifier", tmpl);
}
}
/* Warn for a template specialization SPEC that is missing some of a set
of function or type attributes that the template TEMPL is declared with.
ATTRLIST is a list of additional attributes that SPEC should be taken
to ultimately be declared with. */
static void
warn_spec_missing_attributes (tree tmpl, tree spec, tree attrlist)
{
if (DECL_FUNCTION_TEMPLATE_P (tmpl))
tmpl = DECL_TEMPLATE_RESULT (tmpl);
/* Avoid warning if the difference between the primary and
the specialization is not in one of the attributes below. */
const char* const blacklist[] = {
"alloc_align", "alloc_size", "assume_aligned", "format",
"format_arg", "malloc", "nonnull", NULL
};
/* Put together a list of the black listed attributes that the primary
template is declared with that the specialization is not, in case
it's not apparent from the most recent declaration of the primary. */
pretty_printer str;
unsigned nattrs = decls_mismatched_attributes (tmpl, spec, attrlist,
blacklist, &str);
if (!nattrs)
return;
auto_diagnostic_group d;
if (warning_at (DECL_SOURCE_LOCATION (spec), OPT_Wmissing_attributes,
"explicit specialization %q#D may be missing attributes",
spec))
inform (DECL_SOURCE_LOCATION (tmpl),
nattrs > 1
? G_("missing primary template attributes %s")
: G_("missing primary template attribute %s"),
pp_formatted_text (&str));
}
/* Check to see if the function just declared, as indicated in
DECLARATOR, and in DECL, is a specialization of a function
template. We may also discover that the declaration is an explicit
instantiation at this point.
Returns DECL, or an equivalent declaration that should be used
instead if all goes well. Issues an error message if something is
amiss. Returns error_mark_node if the error is not easily
recoverable.
FLAGS is a bitmask consisting of the following flags:
2: The function has a definition.
4: The function is a friend.
The TEMPLATE_COUNT is the number of references to qualifying
template classes that appeared in the name of the function. For
example, in
template <class T> struct S { void f(); };
void S<int>::f();
the TEMPLATE_COUNT would be 1. However, explicitly specialized
classes are not counted in the TEMPLATE_COUNT, so that in
template <class T> struct S {};
template <> struct S<int> { void f(); }
template <> void S<int>::f();
the TEMPLATE_COUNT would be 0. (Note that this declaration is
invalid; there should be no template <>.)
If the function is a specialization, it is marked as such via
DECL_TEMPLATE_SPECIALIZATION. Furthermore, its DECL_TEMPLATE_INFO
is set up correctly, and it is added to the list of specializations
for that template. */
tree
check_explicit_specialization (tree declarator,
tree decl,
int template_count,
int flags,
tree attrlist)
{
int have_def = flags & 2;
int is_friend = flags & 4;
bool is_concept = flags & 8;
int specialization = 0;
int explicit_instantiation = 0;
int member_specialization = 0;
tree ctype = DECL_CLASS_CONTEXT (decl);
tree dname = DECL_NAME (decl);
tmpl_spec_kind tsk;
if (is_friend)
{
if (!processing_specialization)
tsk = tsk_none;
else
tsk = tsk_excessive_parms;
}
else
tsk = current_tmpl_spec_kind (template_count);
switch (tsk)
{
case tsk_none:
if (processing_specialization && !VAR_P (decl))
{
specialization = 1;
SET_DECL_TEMPLATE_SPECIALIZATION (decl);
}
else if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR)
{
if (is_friend)
/* This could be something like:
template <class T> void f(T);
class S { friend void f<>(int); } */
specialization = 1;
else
{
/* This case handles bogus declarations like template <>
template <class T> void f<int>(); */
error_at (cp_expr_loc_or_input_loc (declarator),
"template-id %qE in declaration of primary template",
declarator);
return decl;
}
}
break;
case tsk_invalid_member_spec:
/* The error has already been reported in
check_specialization_scope. */
return error_mark_node;
case tsk_invalid_expl_inst:
error ("template parameter list used in explicit instantiation");
/* Fall through. */
case tsk_expl_inst:
if (have_def)
error ("definition provided for explicit instantiation");
explicit_instantiation = 1;
break;
case tsk_excessive_parms:
case tsk_insufficient_parms:
if (tsk == tsk_excessive_parms)
error ("too many template parameter lists in declaration of %qD",
decl);
else if (template_header_count)
error("too few template parameter lists in declaration of %qD", decl);
else
error("explicit specialization of %qD must be introduced by "
"%<template <>%>", decl);
/* Fall through. */
case tsk_expl_spec:
if (is_concept)
error ("explicit specialization declared %<concept%>");
if (VAR_P (decl) && TREE_CODE (declarator) != TEMPLATE_ID_EXPR)
/* In cases like template<> constexpr bool v = true;
We'll give an error in check_template_variable. */
break;
SET_DECL_TEMPLATE_SPECIALIZATION (decl);
if (ctype)
member_specialization = 1;
else
specialization = 1;
break;
case tsk_template:
if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR)
{
/* This case handles bogus declarations like template <>
template <class T> void f<int>(); */
if (!uses_template_parms (TREE_OPERAND (declarator, 1)))
error_at (cp_expr_loc_or_input_loc (declarator),
"template-id %qE in declaration of primary template",
declarator);
else if (variable_template_p (TREE_OPERAND (declarator, 0)))
{
/* Partial specialization of variable template. */
SET_DECL_TEMPLATE_SPECIALIZATION (decl);
specialization = 1;
goto ok;
}
else if (cxx_dialect < cxx14)
error_at (cp_expr_loc_or_input_loc (declarator),
"non-type partial specialization %qE "
"is not allowed", declarator);
else
error_at (cp_expr_loc_or_input_loc (declarator),
"non-class, non-variable partial specialization %qE "
"is not allowed", declarator);
return decl;
ok:;
}
if (ctype && CLASSTYPE_TEMPLATE_INSTANTIATION (ctype))
/* This is a specialization of a member template, without
specialization the containing class. Something like:
template <class T> struct S {
template <class U> void f (U);
};
template <> template <class U> void S<int>::f(U) {}
That's a specialization -- but of the entire template. */
specialization = 1;
break;
default:
gcc_unreachable ();
}
if ((specialization || member_specialization)
/* This doesn't apply to variable templates. */
&& FUNC_OR_METHOD_TYPE_P (TREE_TYPE (decl)))
{
tree t = TYPE_ARG_TYPES (TREE_TYPE (decl));
for (; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t))
{
permerror (input_location,
"default argument specified in explicit specialization");
break;
}
}
if (specialization || member_specialization || explicit_instantiation)
{
tree tmpl = NULL_TREE;
tree targs = NULL_TREE;
bool was_template_id = (TREE_CODE (declarator) == TEMPLATE_ID_EXPR);
/* Make sure that the declarator is a TEMPLATE_ID_EXPR. */
if (!was_template_id)
{
tree fns;
gcc_assert (identifier_p (declarator));
if (ctype)
fns = dname;
else
{
/* If there is no class context, the explicit instantiation
must be at namespace scope. */
gcc_assert (DECL_NAMESPACE_SCOPE_P (decl));
/* Find the namespace binding, using the declaration
context. */
fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
false, true);
if (fns == error_mark_node)
/* If lookup fails, look for a friend declaration so we can
give a better diagnostic. */
fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
/*type*/false, /*complain*/true,
/*hidden*/true);
if (fns == error_mark_node || !is_overloaded_fn (fns))
{
error ("%qD is not a template function", dname);
fns = error_mark_node;
}
}
declarator = lookup_template_function (fns, NULL_TREE);
}
if (declarator == error_mark_node)
return error_mark_node;
if (ctype != NULL_TREE && TYPE_BEING_DEFINED (ctype))
{
if (!explicit_instantiation)
/* A specialization in class scope. This is invalid,
but the error will already have been flagged by
check_specialization_scope. */
return error_mark_node;
else
{
/* It's not valid to write an explicit instantiation in
class scope, e.g.:
class C { template void f(); }
This case is caught by the parser. However, on
something like:
template class C { void f(); };
(which is invalid) we can get here. The error will be
issued later. */
;
}
return decl;
}
else if (ctype != NULL_TREE
&& (identifier_p (TREE_OPERAND (declarator, 0))))
{
// We'll match variable templates in start_decl.
if (VAR_P (decl))
return decl;
/* Find the list of functions in ctype that have the same
name as the declared function. */
tree name = TREE_OPERAND (declarator, 0);
if (constructor_name_p (name, ctype))
{
if (DECL_CONSTRUCTOR_P (decl)
? !TYPE_HAS_USER_CONSTRUCTOR (ctype)
: !CLASSTYPE_DESTRUCTOR (ctype))
{
/* From [temp.expl.spec]:
If such an explicit specialization for the member
of a class template names an implicitly-declared
special member function (clause _special_), the
program is ill-formed.
Similar language is found in [temp.explicit]. */
error ("specialization of implicitly-declared special member function");
return error_mark_node;
}
name = DECL_NAME (decl);
}
/* For a type-conversion operator, We might be looking for
`operator int' which will be a specialization of
`operator T'. Grab all the conversion operators, and
then select from them. */
tree fns = get_class_binding (ctype, IDENTIFIER_CONV_OP_P (name)
? conv_op_identifier : name);
if (fns == NULL_TREE)
{
error ("no member function %qD declared in %qT", name, ctype);
return error_mark_node;
}
else
TREE_OPERAND (declarator, 0) = fns;
}
/* Figure out what exactly is being specialized at this point.
Note that for an explicit instantiation, even one for a
member function, we cannot tell a priori whether the
instantiation is for a member template, or just a member
function of a template class. Even if a member template is
being instantiated, the member template arguments may be
elided if they can be deduced from the rest of the
declaration. */
tmpl = determine_specialization (declarator, decl,
&targs,
member_specialization,
template_count,
tsk);
if (!tmpl || tmpl == error_mark_node)
/* We couldn't figure out what this declaration was
specializing. */
return error_mark_node;
else
{
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_HIDDEN_FRIEND_P (tmpl))
{
auto_diagnostic_group d;
if (pedwarn (DECL_SOURCE_LOCATION (decl), 0,
"friend declaration %qD is not visible to "
"explicit specialization", tmpl))
inform (DECL_SOURCE_LOCATION (tmpl),
"friend declaration here");
}
else if (!ctype && !is_friend
&& CP_DECL_CONTEXT (decl) == current_namespace)
check_unqualified_spec_or_inst (tmpl, DECL_SOURCE_LOCATION (decl));
tree gen_tmpl = most_general_template (tmpl);
if (explicit_instantiation)
{
/* We don't set DECL_EXPLICIT_INSTANTIATION here; that
is done by do_decl_instantiation later. */
int arg_depth = TMPL_ARGS_DEPTH (targs);
int parm_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl));
if (arg_depth > parm_depth)
{
/* If TMPL is not the most general template (for
example, if TMPL is a friend template that is
injected into namespace scope), then there will
be too many levels of TARGS. Remove some of them
here. */
int i;
tree new_targs;
new_targs = make_tree_vec (parm_depth);
for (i = arg_depth - parm_depth; i < arg_depth; ++i)
TREE_VEC_ELT (new_targs, i - (arg_depth - parm_depth))
= TREE_VEC_ELT (targs, i);
targs = new_targs;
}
return instantiate_template (tmpl, targs, tf_error);
}
/* If we thought that the DECL was a member function, but it
turns out to be specializing a static member function,
make DECL a static member function as well. */
if (DECL_FUNCTION_TEMPLATE_P (tmpl)
&& DECL_STATIC_FUNCTION_P (tmpl)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
revert_static_member_fn (decl);
/* If this is a specialization of a member template of a
template class, we want to return the TEMPLATE_DECL, not
the specialization of it. */
if (tsk == tsk_template && !was_template_id)
{
tree result = DECL_TEMPLATE_RESULT (tmpl);
SET_DECL_TEMPLATE_SPECIALIZATION (tmpl);
DECL_INITIAL (result) = NULL_TREE;
if (have_def)
{
tree parm;
DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl);
DECL_SOURCE_LOCATION (result)
= DECL_SOURCE_LOCATION (decl);
/* We want to use the argument list specified in the
definition, not in the original declaration. */
DECL_ARGUMENTS (result) = DECL_ARGUMENTS (decl);
for (parm = DECL_ARGUMENTS (result); parm;
parm = DECL_CHAIN (parm))
DECL_CONTEXT (parm) = result;
}
return register_specialization (tmpl, gen_tmpl, targs,
is_friend, 0);
}
/* Set up the DECL_TEMPLATE_INFO for DECL. */
DECL_TEMPLATE_INFO (decl) = build_template_info (tmpl, targs);
if (was_template_id)
TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)) = true;
/* Inherit default function arguments from the template
DECL is specializing. */
if (DECL_FUNCTION_TEMPLATE_P (tmpl))
copy_default_args_to_explicit_spec (decl);
/* This specialization has the same protection as the
template it specializes. */
TREE_PRIVATE (decl) = TREE_PRIVATE (gen_tmpl);
TREE_PROTECTED (decl) = TREE_PROTECTED (gen_tmpl);
/* 7.1.1-1 [dcl.stc]
A storage-class-specifier shall not be specified in an
explicit specialization...
The parser rejects these, so unless action is taken here,
explicit function specializations will always appear with
global linkage.
The action recommended by the C++ CWG in response to C++
defect report 605 is to make the storage class and linkage
of the explicit specialization match the templated function:
http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#605
*/
if (tsk == tsk_expl_spec && DECL_FUNCTION_TEMPLATE_P (gen_tmpl))
{
tree tmpl_func = DECL_TEMPLATE_RESULT (gen_tmpl);
gcc_assert (TREE_CODE (tmpl_func) == FUNCTION_DECL);
/* A concept cannot be specialized. */
if (DECL_DECLARED_CONCEPT_P (tmpl_func))
{
error ("explicit specialization of function concept %qD",
gen_tmpl);
return error_mark_node;
}
/* This specialization has the same linkage and visibility as
the function template it specializes. */
TREE_PUBLIC (decl) = TREE_PUBLIC (tmpl_func);
if (! TREE_PUBLIC (decl))
{
DECL_INTERFACE_KNOWN (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 1;
}
DECL_THIS_STATIC (decl) = DECL_THIS_STATIC (tmpl_func);
if (DECL_VISIBILITY_SPECIFIED (tmpl_func))
{
DECL_VISIBILITY_SPECIFIED (decl) = 1;
DECL_VISIBILITY (decl) = DECL_VISIBILITY (tmpl_func);
}
}
/* If DECL is a friend declaration, declared using an
unqualified name, the namespace associated with DECL may
have been set incorrectly. For example, in:
template <typename T> void f(T);
namespace N {
struct S { friend void f<int>(int); }
}
we will have set the DECL_CONTEXT for the friend
declaration to N, rather than to the global namespace. */
if (DECL_NAMESPACE_SCOPE_P (decl))
DECL_CONTEXT (decl) = DECL_CONTEXT (tmpl);
if (is_friend && !have_def)
/* This is not really a declaration of a specialization.
It's just the name of an instantiation. But, it's not
a request for an instantiation, either. */
SET_DECL_IMPLICIT_INSTANTIATION (decl);
else if (TREE_CODE (decl) == FUNCTION_DECL)
/* A specialization is not necessarily COMDAT. */
DECL_COMDAT (decl) = (TREE_PUBLIC (decl)
&& DECL_DECLARED_INLINE_P (decl));
else if (VAR_P (decl))
DECL_COMDAT (decl) = false;
/* If this is a full specialization, register it so that we can find
it again. Partial specializations will be registered in
process_partial_specialization. */
if (!processing_template_decl)
{
warn_spec_missing_attributes (gen_tmpl, decl, attrlist);
decl = register_specialization (decl, gen_tmpl, targs,
is_friend, 0);
}
/* A 'structor should already have clones. */
gcc_assert (decl == error_mark_node
|| variable_template_p (tmpl)
|| !(DECL_CONSTRUCTOR_P (decl)
|| DECL_DESTRUCTOR_P (decl))
|| DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl)));
}
}
return decl;
}
/* Returns 1 iff PARMS1 and PARMS2 are identical sets of template
parameters. These are represented in the same format used for
DECL_TEMPLATE_PARMS. */
int
comp_template_parms (const_tree parms1, const_tree parms2)
{
const_tree p1;
const_tree p2;
if (parms1 == parms2)
return 1;
for (p1 = parms1, p2 = parms2;
p1 != NULL_TREE && p2 != NULL_TREE;
p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2))
{
tree t1 = TREE_VALUE (p1);
tree t2 = TREE_VALUE (p2);
int i;
gcc_assert (TREE_CODE (t1) == TREE_VEC);
gcc_assert (TREE_CODE (t2) == TREE_VEC);
if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2))
return 0;
for (i = 0; i < TREE_VEC_LENGTH (t2); ++i)
{
tree parm1 = TREE_VALUE (TREE_VEC_ELT (t1, i));
tree parm2 = TREE_VALUE (TREE_VEC_ELT (t2, i));
/* If either of the template parameters are invalid, assume
they match for the sake of error recovery. */
if (error_operand_p (parm1) || error_operand_p (parm2))
return 1;
if (TREE_CODE (parm1) != TREE_CODE (parm2))
return 0;
if (TREE_CODE (parm1) == TEMPLATE_TYPE_PARM
&& (TEMPLATE_TYPE_PARAMETER_PACK (parm1)
== TEMPLATE_TYPE_PARAMETER_PACK (parm2)))
continue;
else if (!same_type_p (TREE_TYPE (parm1), TREE_TYPE (parm2)))
return 0;
}
}
if ((p1 != NULL_TREE) != (p2 != NULL_TREE))
/* One set of parameters has more parameters lists than the
other. */
return 0;
return 1;
}
/* Returns true if two template parameters are declared with
equivalent constraints. */
static bool
template_parameter_constraints_equivalent_p (const_tree parm1, const_tree parm2)
{
tree req1 = TREE_TYPE (parm1);
tree req2 = TREE_TYPE (parm2);
if (!req1 != !req2)
return false;
if (req1)
return cp_tree_equal (req1, req2);
return true;
}
/* Returns true when two template parameters are equivalent. */
static bool
template_parameters_equivalent_p (const_tree parm1, const_tree parm2)
{
tree decl1 = TREE_VALUE (parm1);
tree decl2 = TREE_VALUE (parm2);
/* If either of the template parameters are invalid, assume
they match for the sake of error recovery. */
if (error_operand_p (decl1) || error_operand_p (decl2))
return true;
/* ... they declare parameters of the same kind. */
if (TREE_CODE (decl1) != TREE_CODE (decl2))
return false;
/* ... one parameter was introduced by a parameter declaration, then
both are. This case arises as a result of eagerly rewriting declarations
during parsing. */
if (DECL_VIRTUAL_P (decl1) != DECL_VIRTUAL_P (decl2))
return false;
/* ... if either declares a pack, they both do. */
if (template_parameter_pack_p (decl1) != template_parameter_pack_p (decl2))
return false;
if (TREE_CODE (decl1) == PARM_DECL)
{
/* ... if they declare non-type parameters, the types are equivalent. */
if (!same_type_p (TREE_TYPE (decl1), TREE_TYPE (decl2)))
return false;
}
else if (TREE_CODE (decl2) == TEMPLATE_DECL)
{
/* ... if they declare template template parameters, their template
parameter lists are equivalent. */
if (!template_heads_equivalent_p (decl1, decl2))
return false;
}
/* ... if they are declared with a qualified-concept name, they both
are, and those names are equivalent. */
return template_parameter_constraints_equivalent_p (parm1, parm2);
}
/* Returns true if two template parameters lists are equivalent.
Two template parameter lists are equivalent if they have the
same length and their corresponding parameters are equivalent.
PARMS1 and PARMS2 are TREE_LISTs containing TREE_VECs: the
data structure returned by DECL_TEMPLATE_PARMS.
This is generally the same implementation as comp_template_parms
except that it also the concept names and arguments used to
introduce parameters. */
static bool
template_parameter_lists_equivalent_p (const_tree parms1, const_tree parms2)
{
if (parms1 == parms2)
return true;
const_tree p1 = parms1;
const_tree p2 = parms2;
while (p1 != NULL_TREE && p2 != NULL_TREE)
{
tree list1 = TREE_VALUE (p1);
tree list2 = TREE_VALUE (p2);
if (TREE_VEC_LENGTH (list1) != TREE_VEC_LENGTH (list2))
return 0;
for (int i = 0; i < TREE_VEC_LENGTH (list2); ++i)
{
tree parm1 = TREE_VEC_ELT (list1, i);
tree parm2 = TREE_VEC_ELT (list2, i);
if (!template_parameters_equivalent_p (parm1, parm2))
return false;
}
p1 = TREE_CHAIN (p1);
p2 = TREE_CHAIN (p2);
}
if ((p1 != NULL_TREE) != (p2 != NULL_TREE))
return false;
return true;
}
/* Return true if the requires-clause of the template parameter lists are
equivalent and false otherwise. */
static bool
template_requirements_equivalent_p (const_tree parms1, const_tree parms2)
{
tree req1 = TEMPLATE_PARMS_CONSTRAINTS (parms1);
tree req2 = TEMPLATE_PARMS_CONSTRAINTS (parms2);
if ((req1 != NULL_TREE) != (req2 != NULL_TREE))
return false;
if (!cp_tree_equal (req1, req2))
return false;
return true;
}
/* Returns true if two template heads are equivalent. 17.6.6.1p6:
Two template heads are equivalent if their template parameter
lists are equivalent and their requires clauses are equivalent.
In pre-C++20, this is equivalent to calling comp_template_parms
for the template parameters of TMPL1 and TMPL2. */
bool
template_heads_equivalent_p (const_tree tmpl1, const_tree tmpl2)
{
tree parms1 = DECL_TEMPLATE_PARMS (tmpl1);
tree parms2 = DECL_TEMPLATE_PARMS (tmpl2);
/* Don't change the matching rules for pre-C++20. */
if (cxx_dialect < cxx2a)
return comp_template_parms (parms1, parms2);
/* ... have the same number of template parameters, and their
corresponding parameters are equivalent. */
if (!template_parameter_lists_equivalent_p (parms1, parms2))
return false;
/* ... if either has a requires-clause, they both do and their
corresponding constraint-expressions are equivalent. */
return template_requirements_equivalent_p (parms1, parms2);
}
/* Determine whether PARM is a parameter pack. */
bool
template_parameter_pack_p (const_tree parm)
{
/* Determine if we have a non-type template parameter pack. */
if (TREE_CODE (parm) == PARM_DECL)
return (DECL_TEMPLATE_PARM_P (parm)
&& TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)));
if (TREE_CODE (parm) == TEMPLATE_PARM_INDEX)
return TEMPLATE_PARM_PARAMETER_PACK (parm);
/* If this is a list of template parameters, we could get a
TYPE_DECL or a TEMPLATE_DECL. */
if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL)
parm = TREE_TYPE (parm);
/* Otherwise it must be a type template parameter. */
return ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM)
&& TEMPLATE_TYPE_PARAMETER_PACK (parm));
}
/* Determine if T is a function parameter pack. */
bool
function_parameter_pack_p (const_tree t)
{
if (t && TREE_CODE (t) == PARM_DECL)
return DECL_PACK_P (t);
return false;
}
/* Return the function template declaration of PRIMARY_FUNC_TMPL_INST.
PRIMARY_FUNC_TMPL_INST is a primary function template instantiation. */
tree
get_function_template_decl (const_tree primary_func_tmpl_inst)
{
if (! primary_func_tmpl_inst
|| TREE_CODE (primary_func_tmpl_inst) != FUNCTION_DECL
|| ! primary_template_specialization_p (primary_func_tmpl_inst))
return NULL;
return DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (primary_func_tmpl_inst));
}
/* Return true iff the function parameter PARAM_DECL was expanded
from the function parameter pack PACK. */
bool
function_parameter_expanded_from_pack_p (tree param_decl, tree pack)
{
if (DECL_ARTIFICIAL (param_decl)
|| !function_parameter_pack_p (pack))
return false;
/* The parameter pack and its pack arguments have the same
DECL_PARM_INDEX. */
return DECL_PARM_INDEX (pack) == DECL_PARM_INDEX (param_decl);
}
/* Determine whether ARGS describes a variadic template args list,
i.e., one that is terminated by a template argument pack. */
static bool
template_args_variadic_p (tree args)
{
int nargs;
tree last_parm;
if (args == NULL_TREE)
return false;
args = INNERMOST_TEMPLATE_ARGS (args);
nargs = TREE_VEC_LENGTH (args);
if (nargs == 0)
return false;
last_parm = TREE_VEC_ELT (args, nargs - 1);
return ARGUMENT_PACK_P (last_parm);
}
/* Generate a new name for the parameter pack name NAME (an
IDENTIFIER_NODE) that incorporates its */
static tree
make_ith_pack_parameter_name (tree name, int i)
{
/* Munge the name to include the parameter index. */
#define NUMBUF_LEN 128
char numbuf[NUMBUF_LEN];
char* newname;
int newname_len;
if (name == NULL_TREE)
return name;
snprintf (numbuf, NUMBUF_LEN, "%i", i);
newname_len = IDENTIFIER_LENGTH (name)
+ strlen (numbuf) + 2;
newname = (char*)alloca (newname_len);
snprintf (newname, newname_len,
"%s#%i", IDENTIFIER_POINTER (name), i);
return get_identifier (newname);
}
/* Return true if T is a primary function, class or alias template
specialization, not including the template pattern. */
bool
primary_template_specialization_p (const_tree t)
{
if (!t)
return false;
if (TREE_CODE (t) == FUNCTION_DECL || VAR_P (t))
return (DECL_LANG_SPECIFIC (t)
&& DECL_USE_TEMPLATE (t)
&& DECL_TEMPLATE_INFO (t)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (t)));
else if (CLASS_TYPE_P (t) && !TYPE_DECL_ALIAS_P (TYPE_NAME (t)))
return (CLASSTYPE_TEMPLATE_INFO (t)
&& CLASSTYPE_USE_TEMPLATE (t)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t)));
else if (alias_template_specialization_p (t, nt_transparent))
return true;
return false;
}
/* Return true if PARM is a template template parameter. */
bool
template_template_parameter_p (const_tree parm)
{
return DECL_TEMPLATE_TEMPLATE_PARM_P (parm);
}
/* Return true iff PARM is a DECL representing a type template
parameter. */
bool
template_type_parameter_p (const_tree parm)
{
return (parm
&& (TREE_CODE (parm) == TYPE_DECL
|| TREE_CODE (parm) == TEMPLATE_DECL)
&& DECL_TEMPLATE_PARM_P (parm));
}
/* Return the template parameters of T if T is a
primary template instantiation, NULL otherwise. */
tree
get_primary_template_innermost_parameters (const_tree t)
{
tree parms = NULL, template_info = NULL;
if ((template_info = get_template_info (t))
&& primary_template_specialization_p (t))
parms = INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (TI_TEMPLATE (template_info)));
return parms;
}
/* Return the template parameters of the LEVELth level from the full list
of template parameters PARMS. */
tree
get_template_parms_at_level (tree parms, int level)
{
tree p;
if (!parms
|| TREE_CODE (parms) != TREE_LIST
|| level > TMPL_PARMS_DEPTH (parms))
return NULL_TREE;
for (p = parms; p; p = TREE_CHAIN (p))
if (TMPL_PARMS_DEPTH (p) == level)
return p;
return NULL_TREE;
}
/* Returns the template arguments of T if T is a template instantiation,
NULL otherwise. */
tree
get_template_innermost_arguments (const_tree t)
{
tree args = NULL, template_info = NULL;
if ((template_info = get_template_info (t))
&& TI_ARGS (template_info))
args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (template_info));
return args;
}
/* Return the argument pack elements of T if T is a template argument pack,
NULL otherwise. */
tree
get_template_argument_pack_elems (const_tree t)
{
if (TREE_CODE (t) != TYPE_ARGUMENT_PACK
&& TREE_CODE (t) != NONTYPE_ARGUMENT_PACK)
return NULL;
return ARGUMENT_PACK_ARGS (t);
}
/* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the
ARGUMENT_PACK_SELECT represents. */
static tree
argument_pack_select_arg (tree t)
{
tree args = ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (t));
tree arg = TREE_VEC_ELT (args, ARGUMENT_PACK_SELECT_INDEX (t));
/* If the selected argument is an expansion E, that most likely means we were
called from gen_elem_of_pack_expansion_instantiation during the
substituting of an argument pack (of which the Ith element is a pack
expansion, where I is ARGUMENT_PACK_SELECT_INDEX) into a pack expansion.
In this case, the Ith element resulting from this substituting is going to
be a pack expansion, which pattern is the pattern of E. Let's return the
pattern of E, and gen_elem_of_pack_expansion_instantiation will build the
resulting pack expansion from it. */
if (PACK_EXPANSION_P (arg))
{
/* Make sure we aren't throwing away arg info. */
gcc_assert (!PACK_EXPANSION_EXTRA_ARGS (arg));
arg = PACK_EXPANSION_PATTERN (arg);
}
return arg;
}
/* True iff FN is a function representing a built-in variadic parameter
pack. */
bool
builtin_pack_fn_p (tree fn)
{
if (!fn
|| TREE_CODE (fn) != FUNCTION_DECL
|| !DECL_IS_BUILTIN (fn))
return false;
if (id_equal (DECL_NAME (fn), "__integer_pack"))
return true;
return false;
}
/* True iff CALL is a call to a function representing a built-in variadic
parameter pack. */
static bool
builtin_pack_call_p (tree call)
{
if (TREE_CODE (call) != CALL_EXPR)
return false;
return builtin_pack_fn_p (CALL_EXPR_FN (call));
}
/* Return a TREE_VEC for the expansion of __integer_pack(HI). */
static tree
expand_integer_pack (tree call, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree ohi = CALL_EXPR_ARG (call, 0);
tree hi = tsubst_copy_and_build (ohi, args, complain, in_decl,
false/*fn*/, true/*int_cst*/);
if (value_dependent_expression_p (hi))
{
if (hi != ohi)
{
call = copy_node (call);
CALL_EXPR_ARG (call, 0) = hi;
}
tree ex = make_pack_expansion (call, complain);
tree vec = make_tree_vec (1);
TREE_VEC_ELT (vec, 0) = ex;
return vec;
}
else
{
hi = cxx_constant_value (hi);
int len = valid_constant_size_p (hi) ? tree_to_shwi (hi) : -1;
/* Calculate the largest value of len that won't make the size of the vec
overflow an int. The compiler will exceed resource limits long before
this, but it seems a decent place to diagnose. */
int max = ((INT_MAX - sizeof (tree_vec)) / sizeof (tree)) + 1;
if (len < 0 || len > max)
{
if ((complain & tf_error)
&& hi != error_mark_node)
error ("argument to %<__integer_pack%> must be between 0 and %d",
max);
return error_mark_node;
}
tree vec = make_tree_vec (len);
for (int i = 0; i < len; ++i)
TREE_VEC_ELT (vec, i) = size_int (i);
return vec;
}
}
/* Return a TREE_VEC for the expansion of built-in template parameter pack
CALL. */
static tree
expand_builtin_pack_call (tree call, tree args, tsubst_flags_t complain,
tree in_decl)
{
if (!builtin_pack_call_p (call))
return NULL_TREE;
tree fn = CALL_EXPR_FN (call);
if (id_equal (DECL_NAME (fn), "__integer_pack"))
return expand_integer_pack (call, args, complain, in_decl);
return NULL_TREE;
}
/* Structure used to track the progress of find_parameter_packs_r. */
struct find_parameter_pack_data
{
/* TREE_LIST that will contain all of the parameter packs found by
the traversal. */
tree* parameter_packs;
/* Set of AST nodes that have been visited by the traversal. */
hash_set<tree> *visited;
/* True iff we're making a type pack expansion. */
bool type_pack_expansion_p;
};
/* Identifies all of the argument packs that occur in a template
argument and appends them to the TREE_LIST inside DATA, which is a
find_parameter_pack_data structure. This is a subroutine of
make_pack_expansion and uses_parameter_packs. */
static tree
find_parameter_packs_r (tree *tp, int *walk_subtrees, void* data)
{
tree t = *tp;
struct find_parameter_pack_data* ppd =
(struct find_parameter_pack_data*)data;
bool parameter_pack_p = false;
/* Don't look through typedefs; we are interested in whether a
parameter pack is actually written in the expression/type we're
looking at, not the target type. */
if (TYPE_P (t) && typedef_variant_p (t))
{
/* But do look at arguments for an alias template. */
if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t))
cp_walk_tree (&TI_ARGS (tinfo),
&find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
}
/* Identify whether this is a parameter pack or not. */
switch (TREE_CODE (t))
{
case TEMPLATE_PARM_INDEX:
if (TEMPLATE_PARM_PARAMETER_PACK (t))
parameter_pack_p = true;
break;
case TEMPLATE_TYPE_PARM:
t = TYPE_MAIN_VARIANT (t);
/* FALLTHRU */
case TEMPLATE_TEMPLATE_PARM:
/* If the placeholder appears in the decl-specifier-seq of a function
parameter pack (14.6.3), or the type-specifier-seq of a type-id that
is a pack expansion, the invented template parameter is a template
parameter pack. */
if (ppd->type_pack_expansion_p && is_auto (t))
TEMPLATE_TYPE_PARAMETER_PACK (t) = true;
if (TEMPLATE_TYPE_PARAMETER_PACK (t))
parameter_pack_p = true;
break;
case FIELD_DECL:
case PARM_DECL:
if (DECL_PACK_P (t))
{
/* We don't want to walk into the type of a PARM_DECL,
because we don't want to see the type parameter pack. */
*walk_subtrees = 0;
parameter_pack_p = true;
}
break;
case VAR_DECL:
if (DECL_PACK_P (t))
{
/* We don't want to walk into the type of a variadic capture proxy,
because we don't want to see the type parameter pack. */
*walk_subtrees = 0;
parameter_pack_p = true;
}
else if (variable_template_specialization_p (t))
{
cp_walk_tree (&DECL_TI_ARGS (t),
find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
}
break;
case CALL_EXPR:
if (builtin_pack_call_p (t))
parameter_pack_p = true;
break;
case BASES:
parameter_pack_p = true;
break;
default:
/* Not a parameter pack. */
break;
}
if (parameter_pack_p)
{
/* Add this parameter pack to the list. */
*ppd->parameter_packs = tree_cons (NULL_TREE, t, *ppd->parameter_packs);
}
if (TYPE_P (t))
cp_walk_tree (&TYPE_CONTEXT (t),
&find_parameter_packs_r, ppd, ppd->visited);
/* This switch statement will return immediately if we don't find a
parameter pack. ??? Should some of these be in cp_walk_subtrees? */
switch (TREE_CODE (t))
{
case BOUND_TEMPLATE_TEMPLATE_PARM:
/* Check the template itself. */
cp_walk_tree (&TREE_TYPE (TYPE_TI_TEMPLATE (t)),
&find_parameter_packs_r, ppd, ppd->visited);
return NULL_TREE;
case DECL_EXPR:
{
tree decl = DECL_EXPR_DECL (t);
/* Ignore the declaration of a capture proxy for a parameter pack. */
if (is_capture_proxy (decl))
*walk_subtrees = 0;
if (is_typedef_decl (decl))
/* Since we stop at typedefs above, we need to look through them at
the point of the DECL_EXPR. */
cp_walk_tree (&DECL_ORIGINAL_TYPE (decl),
&find_parameter_packs_r, ppd, ppd->visited);
return NULL_TREE;
}
case TEMPLATE_DECL:
if (!DECL_TEMPLATE_TEMPLATE_PARM_P (t))
return NULL_TREE;
cp_walk_tree (&TREE_TYPE (t),
&find_parameter_packs_r, ppd, ppd->visited);
return NULL_TREE;
case TYPENAME_TYPE:
cp_walk_tree (&TYPENAME_TYPE_FULLNAME (t), &find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case TYPE_PACK_EXPANSION:
case EXPR_PACK_EXPANSION:
*walk_subtrees = 0;
return NULL_TREE;
case INTEGER_TYPE:
cp_walk_tree (&TYPE_MAX_VALUE (t), &find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case IDENTIFIER_NODE:
cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd,
ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case LAMBDA_EXPR:
{
/* Look at explicit captures. */
for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t);
cap; cap = TREE_CHAIN (cap))
cp_walk_tree (&TREE_VALUE (cap), &find_parameter_packs_r, ppd,
ppd->visited);
/* Since we defer implicit capture, look in the parms and body. */
tree fn = lambda_function (t);
cp_walk_tree (&TREE_TYPE (fn), &find_parameter_packs_r, ppd,
ppd->visited);
cp_walk_tree (&DECL_SAVED_TREE (fn), &find_parameter_packs_r, ppd,
ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
}
case DECLTYPE_TYPE:
{
/* When traversing a DECLTYPE_TYPE_EXPR, we need to set
type_pack_expansion_p to false so that any placeholders
within the expression don't get marked as parameter packs. */
bool type_pack_expansion_p = ppd->type_pack_expansion_p;
ppd->type_pack_expansion_p = false;
cp_walk_tree (&DECLTYPE_TYPE_EXPR (t), &find_parameter_packs_r,
ppd, ppd->visited);
ppd->type_pack_expansion_p = type_pack_expansion_p;
*walk_subtrees = 0;
return NULL_TREE;
}
case IF_STMT:
cp_walk_tree (&IF_COND (t), &find_parameter_packs_r,
ppd, ppd->visited);
cp_walk_tree (&THEN_CLAUSE (t), &find_parameter_packs_r,
ppd, ppd->visited);
cp_walk_tree (&ELSE_CLAUSE (t), &find_parameter_packs_r,
ppd, ppd->visited);
/* Don't walk into IF_STMT_EXTRA_ARGS. */
*walk_subtrees = 0;
return NULL_TREE;
default:
return NULL_TREE;
}
return NULL_TREE;
}
/* Determines if the expression or type T uses any parameter packs. */
tree
uses_parameter_packs (tree t)
{
tree parameter_packs = NULL_TREE;
struct find_parameter_pack_data ppd;
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = false;
cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited);
delete ppd.visited;
return parameter_packs;
}
/* Turn ARG, which may be an expression, type, or a TREE_LIST
representation a base-class initializer into a parameter pack
expansion. If all goes well, the resulting node will be an
EXPR_PACK_EXPANSION, TYPE_PACK_EXPANSION, or TREE_LIST,
respectively. */
tree
make_pack_expansion (tree arg, tsubst_flags_t complain)
{
tree result;
tree parameter_packs = NULL_TREE;
bool for_types = false;
struct find_parameter_pack_data ppd;
if (!arg || arg == error_mark_node)
return arg;
if (TREE_CODE (arg) == TREE_LIST && TREE_PURPOSE (arg))
{
/* A TREE_LIST with a non-null TREE_PURPOSE is for a base
class initializer. In this case, the TREE_PURPOSE will be a
_TYPE node (representing the base class expansion we're
initializing) and the TREE_VALUE will be a TREE_LIST
containing the initialization arguments.
The resulting expansion looks somewhat different from most
expansions. Rather than returning just one _EXPANSION, we
return a TREE_LIST whose TREE_PURPOSE is a
TYPE_PACK_EXPANSION containing the bases that will be
initialized. The TREE_VALUE will be identical to the
original TREE_VALUE, which is a list of arguments that will
be passed to each base. We do not introduce any new pack
expansion nodes into the TREE_VALUE (although it is possible
that some already exist), because the TREE_PURPOSE and
TREE_VALUE all need to be expanded together with the same
_EXPANSION node. Note that the TYPE_PACK_EXPANSION in the
resulting TREE_PURPOSE will mention the parameter packs in
both the bases and the arguments to the bases. */
tree purpose;
tree value;
tree parameter_packs = NULL_TREE;
/* Determine which parameter packs will be used by the base
class expansion. */
ppd.visited = new hash_set<tree>;
ppd.parameter_packs = ¶meter_packs;
ppd.type_pack_expansion_p = false;
gcc_assert (TYPE_P (TREE_PURPOSE (arg)));
cp_walk_tree (&TREE_PURPOSE (arg), &find_parameter_packs_r,
&ppd, ppd.visited);
if (parameter_packs == NULL_TREE)
{
if (complain & tf_error)
error ("base initializer expansion %qT contains no parameter packs",
arg);
delete ppd.visited;
return error_mark_node;
}
if (TREE_VALUE (arg) != void_type_node)
{
/* Collect the sets of parameter packs used in each of the
initialization arguments. */
for (value = TREE_VALUE (arg); value; value = TREE_CHAIN (value))
{
/* Determine which parameter packs will be expanded in this
argument. */
cp_walk_tree (&TREE_VALUE (value), &find_parameter_packs_r,
&ppd, ppd.visited);
}
}
delete ppd.visited;
/* Create the pack expansion type for the base type. */
purpose = cxx_make_type (TYPE_PACK_EXPANSION);
SET_PACK_EXPANSION_PATTERN (purpose, TREE_PURPOSE (arg));
PACK_EXPANSION_PARAMETER_PACKS (purpose) = parameter_packs;
PACK_EXPANSION_LOCAL_P (purpose) = at_function_scope_p ();
/* Just use structural equality for these TYPE_PACK_EXPANSIONS;
they will rarely be compared to anything. */
SET_TYPE_STRUCTURAL_EQUALITY (purpose);
return tree_cons (purpose, TREE_VALUE (arg), NULL_TREE);
}
if (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL)
for_types = true;
/* Build the PACK_EXPANSION_* node. */
result = for_types
? cxx_make_type (TYPE_PACK_EXPANSION)
: make_node (EXPR_PACK_EXPANSION);
SET_PACK_EXPANSION_PATTERN (result, arg);
if (TREE_CODE (result) == EXPR_PACK_EXPANSION)
{
/* Propagate type and const-expression information. */
TREE_TYPE (result) = TREE_TYPE (arg);
TREE_CONSTANT (result) = TREE_CONSTANT (arg);
/* Mark this read now, since the expansion might be length 0. */
mark_exp_read (arg);
}
else
/* Just use structural equality for these TYPE_PACK_EXPANSIONS;
they will rarely be compared to anything. */
SET_TYPE_STRUCTURAL_EQUALITY (result);
/* Determine which parameter packs will be expanded. */
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = TYPE_P (arg);
cp_walk_tree (&arg, &find_parameter_packs_r, &ppd, ppd.visited);
delete ppd.visited;
/* Make sure we found some parameter packs. */
if (parameter_packs == NULL_TREE)
{
if (complain & tf_error)
{
if (TYPE_P (arg))
error ("expansion pattern %qT contains no parameter packs", arg);
else
error ("expansion pattern %qE contains no parameter packs", arg);
}
return error_mark_node;
}
PACK_EXPANSION_PARAMETER_PACKS (result) = parameter_packs;
PACK_EXPANSION_LOCAL_P (result) = at_function_scope_p ();
return result;
}
/* Checks T for any "bare" parameter packs, which have not yet been
expanded, and issues an error if any are found. This operation can
only be done on full expressions or types (e.g., an expression
statement, "if" condition, etc.), because we could have expressions like:
foo(f(g(h(args)))...)
where "args" is a parameter pack. check_for_bare_parameter_packs
should not be called for the subexpressions args, h(args),
g(h(args)), or f(g(h(args))), because we would produce erroneous
error messages.
Returns TRUE and emits an error if there were bare parameter packs,
returns FALSE otherwise. */
bool
check_for_bare_parameter_packs (tree t, location_t loc /* = UNKNOWN_LOCATION */)
{
tree parameter_packs = NULL_TREE;
struct find_parameter_pack_data ppd;
if (!processing_template_decl || !t || t == error_mark_node)
return false;
/* A lambda might use a parameter pack from the containing context. */
if (current_class_type && LAMBDA_TYPE_P (current_class_type)
&& CLASSTYPE_TEMPLATE_INFO (current_class_type))
return false;
if (TREE_CODE (t) == TYPE_DECL)
t = TREE_TYPE (t);
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = false;
cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited);
delete ppd.visited;
if (parameter_packs)
{
if (loc == UNKNOWN_LOCATION)
loc = cp_expr_loc_or_input_loc (t);
error_at (loc, "parameter packs not expanded with %<...%>:");
while (parameter_packs)
{
tree pack = TREE_VALUE (parameter_packs);
tree name = NULL_TREE;
if (TREE_CODE (pack) == TEMPLATE_TYPE_PARM
|| TREE_CODE (pack) == TEMPLATE_TEMPLATE_PARM)
name = TYPE_NAME (pack);
else if (TREE_CODE (pack) == TEMPLATE_PARM_INDEX)
name = DECL_NAME (TEMPLATE_PARM_DECL (pack));
else if (TREE_CODE (pack) == CALL_EXPR)
name = DECL_NAME (CALL_EXPR_FN (pack));
else
name = DECL_NAME (pack);
if (name)
inform (loc, " %qD", name);
else
inform (loc, " %s", "<anonymous>");
parameter_packs = TREE_CHAIN (parameter_packs);
}
return true;
}
return false;
}
/* Expand any parameter packs that occur in the template arguments in
ARGS. */
tree
expand_template_argument_pack (tree args)
{
if (args == error_mark_node)
return error_mark_node;
tree result_args = NULL_TREE;
int in_arg, out_arg = 0, nargs = args ? TREE_VEC_LENGTH (args) : 0;
int num_result_args = -1;
int non_default_args_count = -1;
/* First, determine if we need to expand anything, and the number of
slots we'll need. */
for (in_arg = 0; in_arg < nargs; ++in_arg)
{
tree arg = TREE_VEC_ELT (args, in_arg);
if (arg == NULL_TREE)
return args;
if (ARGUMENT_PACK_P (arg))
{
int num_packed = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg));
if (num_result_args < 0)
num_result_args = in_arg + num_packed;
else
num_result_args += num_packed;
}
else
{
if (num_result_args >= 0)
num_result_args++;
}
}
/* If no expansion is necessary, we're done. */
if (num_result_args < 0)
return args;
/* Expand arguments. */
result_args = make_tree_vec (num_result_args);
if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (args))
non_default_args_count =
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (args);
for (in_arg = 0; in_arg < nargs; ++in_arg)
{
tree arg = TREE_VEC_ELT (args, in_arg);
if (ARGUMENT_PACK_P (arg))
{
tree packed = ARGUMENT_PACK_ARGS (arg);
int i, num_packed = TREE_VEC_LENGTH (packed);
for (i = 0; i < num_packed; ++i, ++out_arg)
TREE_VEC_ELT (result_args, out_arg) = TREE_VEC_ELT(packed, i);
if (non_default_args_count > 0)
non_default_args_count += num_packed - 1;
}
else
{
TREE_VEC_ELT (result_args, out_arg) = arg;
++out_arg;
}
}
if (non_default_args_count >= 0)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (result_args, non_default_args_count);
return result_args;
}
/* Checks if DECL shadows a template parameter.
[temp.local]: A template-parameter shall not be redeclared within its
scope (including nested scopes).
Emits an error and returns TRUE if the DECL shadows a parameter,
returns FALSE otherwise. */
bool
check_template_shadow (tree decl)
{
tree olddecl;
/* If we're not in a template, we can't possibly shadow a template
parameter. */
if (!current_template_parms)
return true;
/* Figure out what we're shadowing. */
decl = OVL_FIRST (decl);
olddecl = innermost_non_namespace_value (DECL_NAME (decl));
/* If there's no previous binding for this name, we're not shadowing
anything, let alone a template parameter. */
if (!olddecl)
return true;
/* If we're not shadowing a template parameter, we're done. Note
that OLDDECL might be an OVERLOAD (or perhaps even an
ERROR_MARK), so we can't just blithely assume it to be a _DECL
node. */
if (!DECL_P (olddecl) || !DECL_TEMPLATE_PARM_P (olddecl))
return true;
/* We check for decl != olddecl to avoid bogus errors for using a
name inside a class. We check TPFI to avoid duplicate errors for
inline member templates. */
if (decl == olddecl
|| (DECL_TEMPLATE_PARM_P (decl)
&& TEMPLATE_PARMS_FOR_INLINE (current_template_parms)))
return true;
/* Don't complain about the injected class name, as we've already
complained about the class itself. */
if (DECL_SELF_REFERENCE_P (decl))
return false;
if (DECL_TEMPLATE_PARM_P (decl))
error ("declaration of template parameter %q+D shadows "
"template parameter", decl);
else
error ("declaration of %q+#D shadows template parameter", decl);
inform (DECL_SOURCE_LOCATION (olddecl),
"template parameter %qD declared here", olddecl);
return false;
}
/* Return a new TEMPLATE_PARM_INDEX with the indicated INDEX, LEVEL,
ORIG_LEVEL, DECL, and TYPE. */
static tree
build_template_parm_index (int index,
int level,
int orig_level,
tree decl,
tree type)
{
tree t = make_node (TEMPLATE_PARM_INDEX);
TEMPLATE_PARM_IDX (t) = index;
TEMPLATE_PARM_LEVEL (t) = level;
TEMPLATE_PARM_ORIG_LEVEL (t) = orig_level;
TEMPLATE_PARM_DECL (t) = decl;
TREE_TYPE (t) = type;
TREE_CONSTANT (t) = TREE_CONSTANT (decl);
TREE_READONLY (t) = TREE_READONLY (decl);
return t;
}
/* Find the canonical type parameter for the given template type
parameter. Returns the canonical type parameter, which may be TYPE
if no such parameter existed. */
static tree
canonical_type_parameter (tree type)
{
tree list;
int idx = TEMPLATE_TYPE_IDX (type);
gcc_assert (TREE_CODE (type) != TEMPLATE_TEMPLATE_PARM);
if (!canonical_template_parms)
vec_alloc (canonical_template_parms, idx + 1);
if (canonical_template_parms->length () <= (unsigned) idx)
vec_safe_grow_cleared (canonical_template_parms, idx + 1);
list = (*canonical_template_parms)[idx];
while (list && !comptypes (type, TREE_VALUE (list), COMPARE_STRUCTURAL))
list = TREE_CHAIN (list);
if (list)
return TREE_VALUE (list);
else
{
(*canonical_template_parms)[idx]
= tree_cons (NULL_TREE, type, (*canonical_template_parms)[idx]);
return type;
}
}
/* Return a TEMPLATE_PARM_INDEX, similar to INDEX, but whose
TEMPLATE_PARM_LEVEL has been decreased by LEVELS. If such a
TEMPLATE_PARM_INDEX already exists, it is returned; otherwise, a
new one is created. */
static tree
reduce_template_parm_level (tree index, tree type, int levels, tree args,
tsubst_flags_t complain)
{
if (TEMPLATE_PARM_DESCENDANTS (index) == NULL_TREE
|| (TEMPLATE_PARM_LEVEL (TEMPLATE_PARM_DESCENDANTS (index))
!= TEMPLATE_PARM_LEVEL (index) - levels)
|| !same_type_p (type, TREE_TYPE (TEMPLATE_PARM_DESCENDANTS (index))))
{
tree orig_decl = TEMPLATE_PARM_DECL (index);
tree decl = build_decl (DECL_SOURCE_LOCATION (orig_decl),
TREE_CODE (orig_decl), DECL_NAME (orig_decl),
type);
TREE_CONSTANT (decl) = TREE_CONSTANT (orig_decl);
TREE_READONLY (decl) = TREE_READONLY (orig_decl);
DECL_VIRTUAL_P (decl) = DECL_VIRTUAL_P (orig_decl);
DECL_ARTIFICIAL (decl) = 1;
SET_DECL_TEMPLATE_PARM_P (decl);
tree tpi = build_template_parm_index (TEMPLATE_PARM_IDX (index),
TEMPLATE_PARM_LEVEL (index) - levels,
TEMPLATE_PARM_ORIG_LEVEL (index),
decl, type);
TEMPLATE_PARM_DESCENDANTS (index) = tpi;
TEMPLATE_PARM_PARAMETER_PACK (tpi)
= TEMPLATE_PARM_PARAMETER_PACK (index);
/* Template template parameters need this. */
tree inner = decl;
if (TREE_CODE (decl) == TEMPLATE_DECL)
{
inner = build_decl (DECL_SOURCE_LOCATION (decl),
TYPE_DECL, DECL_NAME (decl), type);
DECL_TEMPLATE_RESULT (decl) = inner;
DECL_ARTIFICIAL (inner) = true;
DECL_TEMPLATE_PARMS (decl) = tsubst_template_parms
(DECL_TEMPLATE_PARMS (orig_decl), args, complain);
}
/* Attach the TPI to the decl. */
if (TREE_CODE (inner) == TYPE_DECL)
TEMPLATE_TYPE_PARM_INDEX (type) = tpi;
else
DECL_INITIAL (decl) = tpi;
}
return TEMPLATE_PARM_DESCENDANTS (index);
}
/* Process information from new template parameter PARM and append it
to the LIST being built. This new parameter is a non-type
parameter iff IS_NON_TYPE is true. This new parameter is a
parameter pack iff IS_PARAMETER_PACK is true. The location of PARM
is in PARM_LOC. */
tree
process_template_parm (tree list, location_t parm_loc, tree parm,
bool is_non_type, bool is_parameter_pack)
{
tree decl = 0;
int idx = 0;
gcc_assert (TREE_CODE (parm) == TREE_LIST);
tree defval = TREE_PURPOSE (parm);
tree constr = TREE_TYPE (parm);
if (list)
{
tree p = tree_last (list);
if (p && TREE_VALUE (p) != error_mark_node)
{
p = TREE_VALUE (p);
if (TREE_CODE (p) == TYPE_DECL || TREE_CODE (p) == TEMPLATE_DECL)
idx = TEMPLATE_TYPE_IDX (TREE_TYPE (p));
else
idx = TEMPLATE_PARM_IDX (DECL_INITIAL (p));
}
++idx;
}
if (is_non_type)
{
parm = TREE_VALUE (parm);
SET_DECL_TEMPLATE_PARM_P (parm);
if (TREE_TYPE (parm) != error_mark_node)
{
/* [temp.param]
The top-level cv-qualifiers on the template-parameter are
ignored when determining its type. */
TREE_TYPE (parm) = TYPE_MAIN_VARIANT (TREE_TYPE (parm));
if (invalid_nontype_parm_type_p (TREE_TYPE (parm), 1))
TREE_TYPE (parm) = error_mark_node;
else if (uses_parameter_packs (TREE_TYPE (parm))
&& !is_parameter_pack
/* If we're in a nested template parameter list, the template
template parameter could be a parameter pack. */
&& processing_template_parmlist == 1)
{
/* This template parameter is not a parameter pack, but it
should be. Complain about "bare" parameter packs. */
check_for_bare_parameter_packs (TREE_TYPE (parm));
/* Recover by calling this a parameter pack. */
is_parameter_pack = true;
}
}
/* A template parameter is not modifiable. */
TREE_CONSTANT (parm) = 1;
TREE_READONLY (parm) = 1;
decl = build_decl (parm_loc,
CONST_DECL, DECL_NAME (parm), TREE_TYPE (parm));
TREE_CONSTANT (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_INITIAL (parm) = DECL_INITIAL (decl)
= build_template_parm_index (idx, processing_template_decl,
processing_template_decl,
decl, TREE_TYPE (parm));
TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))
= is_parameter_pack;
}
else
{
tree t;
parm = TREE_VALUE (TREE_VALUE (parm));
if (parm && TREE_CODE (parm) == TEMPLATE_DECL)
{
t = cxx_make_type (TEMPLATE_TEMPLATE_PARM);
/* This is for distinguishing between real templates and template
template parameters */
TREE_TYPE (parm) = t;
/* any_template_parm_r expects to be able to get the targs of a
DECL_TEMPLATE_RESULT. */
tree result = DECL_TEMPLATE_RESULT (parm);
TREE_TYPE (result) = t;
tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (parm));
tree tinfo = build_template_info (parm, args);
retrofit_lang_decl (result);
DECL_TEMPLATE_INFO (result) = tinfo;
decl = parm;
}
else
{
t = cxx_make_type (TEMPLATE_TYPE_PARM);
/* parm is either IDENTIFIER_NODE or NULL_TREE. */
decl = build_decl (parm_loc,
TYPE_DECL, parm, t);
}
TYPE_NAME (t) = decl;
TYPE_STUB_DECL (t) = decl;
parm = decl;
TEMPLATE_TYPE_PARM_INDEX (t)
= build_template_parm_index (idx, processing_template_decl,
processing_template_decl,
decl, TREE_TYPE (parm));
TEMPLATE_TYPE_PARAMETER_PACK (t) = is_parameter_pack;
if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM)
SET_TYPE_STRUCTURAL_EQUALITY (t);
else
TYPE_CANONICAL (t) = canonical_type_parameter (t);
}
DECL_ARTIFICIAL (decl) = 1;
SET_DECL_TEMPLATE_PARM_P (decl);
/* Build requirements for the type/template parameter.
This must be done after SET_DECL_TEMPLATE_PARM_P or
process_template_parm could fail. */
tree reqs = finish_shorthand_constraint (parm, constr);
decl = pushdecl (decl);
if (!is_non_type)
parm = decl;
/* Build the parameter node linking the parameter declaration,
its default argument (if any), and its constraints (if any). */
parm = build_tree_list (defval, parm);
TEMPLATE_PARM_CONSTRAINTS (parm) = reqs;
return chainon (list, parm);
}
/* The end of a template parameter list has been reached. Process the
tree list into a parameter vector, converting each parameter into a more
useful form. Type parameters are saved as IDENTIFIER_NODEs, and others
as PARM_DECLs. */
tree
end_template_parm_list (tree parms)
{
int nparms;
tree parm, next;
tree saved_parmlist = make_tree_vec (list_length (parms));
/* Pop the dummy parameter level and add the real one. */
current_template_parms = TREE_CHAIN (current_template_parms);
current_template_parms
= tree_cons (size_int (processing_template_decl),
saved_parmlist, current_template_parms);
for (parm = parms, nparms = 0; parm; parm = next, nparms++)
{
next = TREE_CHAIN (parm);
TREE_VEC_ELT (saved_parmlist, nparms) = parm;
TREE_CHAIN (parm) = NULL_TREE;
}
--processing_template_parmlist;
return saved_parmlist;
}
// Explicitly indicate the end of the template parameter list. We assume
// that the current template parameters have been constructed and/or
// managed explicitly, as when creating new template template parameters
// from a shorthand constraint.
void
end_template_parm_list ()
{
--processing_template_parmlist;
}
/* end_template_decl is called after a template declaration is seen. */
void
end_template_decl (void)
{
reset_specialization ();
if (! processing_template_decl)
return;
/* This matches the pushlevel in begin_template_parm_list. */
finish_scope ();
--processing_template_decl;
current_template_parms = TREE_CHAIN (current_template_parms);
}
/* Takes a TEMPLATE_PARM_P or DECL_TEMPLATE_PARM_P node or a TREE_LIST
thereof, and converts it into an argument suitable to be passed to
the type substitution functions. Note that if the TREE_LIST contains
an error_mark node, the returned argument is error_mark_node. */
tree
template_parm_to_arg (tree t)
{
if (!t)
return NULL_TREE;
if (TREE_CODE (t) == TREE_LIST)
t = TREE_VALUE (t);
if (error_operand_p (t))
return error_mark_node;
if (DECL_P (t) && DECL_TEMPLATE_PARM_P (t))
{
if (TREE_CODE (t) == TYPE_DECL
|| TREE_CODE (t) == TEMPLATE_DECL)
t = TREE_TYPE (t);
else
t = DECL_INITIAL (t);
}
gcc_assert (TEMPLATE_PARM_P (t));
if (TREE_CODE (t) == TEMPLATE_TYPE_PARM
|| TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM)
{
if (TEMPLATE_TYPE_PARAMETER_PACK (t))
{
/* Turn this argument into a TYPE_ARGUMENT_PACK
with a single element, which expands T. */
tree vec = make_tree_vec (1);
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
TREE_VEC_ELT (vec, 0) = make_pack_expansion (t);
t = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (t, vec);
}
}
else
{
if (TEMPLATE_PARM_PARAMETER_PACK (t))
{
/* Turn this argument into a NONTYPE_ARGUMENT_PACK
with a single element, which expands T. */
tree vec = make_tree_vec (1);
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
t = convert_from_reference (t);
TREE_VEC_ELT (vec, 0) = make_pack_expansion (t);
t = make_node (NONTYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (t, vec);
}
else
t = convert_from_reference (t);
}
return t;
}
/* Given a single level of template parameters (a TREE_VEC), return it
as a set of template arguments. */
tree
template_parms_level_to_args (tree parms)
{
tree a = copy_node (parms);
TREE_TYPE (a) = NULL_TREE;
for (int i = TREE_VEC_LENGTH (a) - 1; i >= 0; --i)
TREE_VEC_ELT (a, i) = template_parm_to_arg (TREE_VEC_ELT (a, i));
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (a, TREE_VEC_LENGTH (a));
return a;
}
/* Given a set of template parameters, return them as a set of template
arguments. The template parameters are represented as a TREE_VEC, in
the form documented in cp-tree.h for template arguments. */
tree
template_parms_to_args (tree parms)
{
tree header;
tree args = NULL_TREE;
int length = TMPL_PARMS_DEPTH (parms);
int l = length;
/* If there is only one level of template parameters, we do not
create a TREE_VEC of TREE_VECs. Instead, we return a single
TREE_VEC containing the arguments. */
if (length > 1)
args = make_tree_vec (length);
for (header = parms; header; header = TREE_CHAIN (header))
{
tree a = template_parms_level_to_args (TREE_VALUE (header));
if (length > 1)
TREE_VEC_ELT (args, --l) = a;
else
args = a;
}
return args;
}
/* Within the declaration of a template, return the currently active
template parameters as an argument TREE_VEC. */
static tree
current_template_args (void)
{
return template_parms_to_args (current_template_parms);
}
/* Return the fully generic arguments for of TMPL, i.e. what
current_template_args would be while parsing it. */
tree
generic_targs_for (tree tmpl)
{
if (tmpl == NULL_TREE)
return NULL_TREE;
if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)
|| DECL_TEMPLATE_SPECIALIZATION (tmpl))
/* DECL_TEMPLATE_RESULT doesn't have the arguments we want. For a template
template parameter, it has no TEMPLATE_INFO; for a partial
specialization, it has the arguments for the primary template, and we
want the arguments for the partial specialization. */;
else if (tree result = DECL_TEMPLATE_RESULT (tmpl))
if (tree ti = get_template_info (result))
return TI_ARGS (ti);
return template_parms_to_args (DECL_TEMPLATE_PARMS (tmpl));
}
/* Update the declared TYPE by doing any lookups which were thought to be
dependent, but are not now that we know the SCOPE of the declarator. */
tree
maybe_update_decl_type (tree orig_type, tree scope)
{
tree type = orig_type;
if (type == NULL_TREE)
return type;
if (TREE_CODE (orig_type) == TYPE_DECL)
type = TREE_TYPE (type);
if (scope && TYPE_P (scope) && dependent_type_p (scope)
&& dependent_type_p (type)
/* Don't bother building up the args in this case. */
&& TREE_CODE (type) != TEMPLATE_TYPE_PARM)
{
/* tsubst in the args corresponding to the template parameters,
including auto if present. Most things will be unchanged, but
make_typename_type and tsubst_qualified_id will resolve
TYPENAME_TYPEs and SCOPE_REFs that were previously dependent. */
tree args = current_template_args ();
tree auto_node = type_uses_auto (type);
tree pushed;
if (auto_node)
{
tree auto_vec = make_tree_vec (1);
TREE_VEC_ELT (auto_vec, 0) = auto_node;
args = add_to_template_args (args, auto_vec);
}
pushed = push_scope (scope);
type = tsubst (type, args, tf_warning_or_error, NULL_TREE);
if (pushed)
pop_scope (scope);
}
if (type == error_mark_node)
return orig_type;
if (TREE_CODE (orig_type) == TYPE_DECL)
{
if (same_type_p (type, TREE_TYPE (orig_type)))
type = orig_type;
else
type = TYPE_NAME (type);
}
return type;
}
/* Return a TEMPLATE_DECL corresponding to DECL, using the indicated
template PARMS and constraints, CONSTR. If MEMBER_TEMPLATE_P is true,
the new template is a member template. */
static tree
build_template_decl (tree decl, tree parms, bool member_template_p)
{
tree tmpl = build_lang_decl (TEMPLATE_DECL, DECL_NAME (decl), NULL_TREE);
SET_DECL_LANGUAGE (tmpl, DECL_LANGUAGE (decl));
DECL_TEMPLATE_PARMS (tmpl) = parms;
DECL_CONTEXT (tmpl) = DECL_CONTEXT (decl);
DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl);
DECL_MEMBER_TEMPLATE_P (tmpl) = member_template_p;
return tmpl;
}
struct template_parm_data
{
/* The level of the template parameters we are currently
processing. */
int level;
/* The index of the specialization argument we are currently
processing. */
int current_arg;
/* An array whose size is the number of template parameters. The
elements are nonzero if the parameter has been used in any one
of the arguments processed so far. */
int* parms;
/* An array whose size is the number of template arguments. The
elements are nonzero if the argument makes use of template
parameters of this level. */
int* arg_uses_template_parms;
};
/* Subroutine of push_template_decl used to see if each template
parameter in a partial specialization is used in the explicit
argument list. If T is of the LEVEL given in DATA (which is
treated as a template_parm_data*), then DATA->PARMS is marked
appropriately. */
static int
mark_template_parm (tree t, void* data)
{
int level;
int idx;
struct template_parm_data* tpd = (struct template_parm_data*) data;
template_parm_level_and_index (t, &level, &idx);
if (level == tpd->level)
{
tpd->parms[idx] = 1;
tpd->arg_uses_template_parms[tpd->current_arg] = 1;
}
/* In C++17 the type of a non-type argument is a deduced context. */
if (cxx_dialect >= cxx17
&& TREE_CODE (t) == TEMPLATE_PARM_INDEX)
for_each_template_parm (TREE_TYPE (t),
&mark_template_parm,
data,
NULL,
/*include_nondeduced_p=*/false);
/* Return zero so that for_each_template_parm will continue the
traversal of the tree; we want to mark *every* template parm. */
return 0;
}
/* Process the partial specialization DECL. */
static tree
process_partial_specialization (tree decl)
{
tree type = TREE_TYPE (decl);
tree tinfo = get_template_info (decl);
tree maintmpl = TI_TEMPLATE (tinfo);
tree specargs = TI_ARGS (tinfo);
tree inner_args = INNERMOST_TEMPLATE_ARGS (specargs);
tree main_inner_parms = DECL_INNERMOST_TEMPLATE_PARMS (maintmpl);
tree inner_parms;
tree inst;
int nargs = TREE_VEC_LENGTH (inner_args);
int ntparms;
int i;
bool did_error_intro = false;
struct template_parm_data tpd;
struct template_parm_data tpd2;
gcc_assert (current_template_parms);
/* A concept cannot be specialized. */
if (flag_concepts && variable_concept_p (maintmpl))
{
error ("specialization of variable concept %q#D", maintmpl);
return error_mark_node;
}
inner_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms);
ntparms = TREE_VEC_LENGTH (inner_parms);
/* We check that each of the template parameters given in the
partial specialization is used in the argument list to the
specialization. For example:
template <class T> struct S;
template <class T> struct S<T*>;
The second declaration is OK because `T*' uses the template
parameter T, whereas
template <class T> struct S<int>;
is no good. Even trickier is:
template <class T>
struct S1
{
template <class U>
struct S2;
template <class U>
struct S2<T>;
};
The S2<T> declaration is actually invalid; it is a
full-specialization. Of course,
template <class U>
struct S2<T (*)(U)>;
or some such would have been OK. */
tpd.level = TMPL_PARMS_DEPTH (current_template_parms);
tpd.parms = XALLOCAVEC (int, ntparms);
memset (tpd.parms, 0, sizeof (int) * ntparms);
tpd.arg_uses_template_parms = XALLOCAVEC (int, nargs);
memset (tpd.arg_uses_template_parms, 0, sizeof (int) * nargs);
for (i = 0; i < nargs; ++i)
{
tpd.current_arg = i;
for_each_template_parm (TREE_VEC_ELT (inner_args, i),
&mark_template_parm,
&tpd,
NULL,
/*include_nondeduced_p=*/false);
}
for (i = 0; i < ntparms; ++i)
if (tpd.parms[i] == 0)
{
/* One of the template parms was not used in a deduced context in the
specialization. */
if (!did_error_intro)
{
error ("template parameters not deducible in "
"partial specialization:");
did_error_intro = true;
}
inform (input_location, " %qD",
TREE_VALUE (TREE_VEC_ELT (inner_parms, i)));
}
if (did_error_intro)
return error_mark_node;
/* [temp.class.spec]
The argument list of the specialization shall not be identical to
the implicit argument list of the primary template. */
tree main_args
= TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (maintmpl)));
if (comp_template_args (inner_args, INNERMOST_TEMPLATE_ARGS (main_args))
&& (!flag_concepts
|| !strictly_subsumes (current_template_constraints (),
main_args, maintmpl)))
{
if (!flag_concepts)
error ("partial specialization %q+D does not specialize "
"any template arguments; to define the primary template, "
"remove the template argument list", decl);
else
error ("partial specialization %q+D does not specialize any "
"template arguments and is not more constrained than "
"the primary template; to define the primary template, "
"remove the template argument list", decl);
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here");
}
/* A partial specialization that replaces multiple parameters of the
primary template with a pack expansion is less specialized for those
parameters. */
if (nargs < DECL_NTPARMS (maintmpl))
{
error ("partial specialization is not more specialized than the "
"primary template because it replaces multiple parameters "
"with a pack expansion");
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here");
/* Avoid crash in process_partial_specialization. */
return decl;
}
else if (nargs > DECL_NTPARMS (maintmpl))
{
error ("too many arguments for partial specialization %qT", type);
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here");
/* Avoid crash below. */
return decl;
}
/* If we aren't in a dependent class, we can actually try deduction. */
else if (tpd.level == 1
/* FIXME we should be able to handle a partial specialization of a
partial instantiation, but currently we can't (c++/41727). */
&& TMPL_ARGS_DEPTH (specargs) == 1
&& !get_partial_spec_bindings (maintmpl, maintmpl, specargs))
{
auto_diagnostic_group d;
if (permerror (input_location, "partial specialization %qD is not "
"more specialized than", decl))
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template %qD",
maintmpl);
}
/* [temp.class.spec]
A partially specialized non-type argument expression shall not
involve template parameters of the partial specialization except
when the argument expression is a simple identifier.
The type of a template parameter corresponding to a specialized
non-type argument shall not be dependent on a parameter of the
specialization.
Also, we verify that pack expansions only occur at the
end of the argument list. */
tpd2.parms = 0;
for (i = 0; i < nargs; ++i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (main_inner_parms, i));
tree arg = TREE_VEC_ELT (inner_args, i);
tree packed_args = NULL_TREE;
int j, len = 1;
if (ARGUMENT_PACK_P (arg))
{
/* Extract the arguments from the argument pack. We'll be
iterating over these in the following loop. */
packed_args = ARGUMENT_PACK_ARGS (arg);
len = TREE_VEC_LENGTH (packed_args);
}
for (j = 0; j < len; j++)
{
if (packed_args)
/* Get the Jth argument in the parameter pack. */
arg = TREE_VEC_ELT (packed_args, j);
if (PACK_EXPANSION_P (arg))
{
/* Pack expansions must come at the end of the
argument list. */
if ((packed_args && j < len - 1)
|| (!packed_args && i < nargs - 1))
{
if (TREE_CODE (arg) == EXPR_PACK_EXPANSION)
error ("parameter pack argument %qE must be at the "
"end of the template argument list", arg);
else
error ("parameter pack argument %qT must be at the "
"end of the template argument list", arg);
}
}
if (TREE_CODE (arg) == EXPR_PACK_EXPANSION)
/* We only care about the pattern. */
arg = PACK_EXPANSION_PATTERN (arg);
if (/* These first two lines are the `non-type' bit. */
!TYPE_P (arg)
&& TREE_CODE (arg) != TEMPLATE_DECL
/* This next two lines are the `argument expression is not just a
simple identifier' condition and also the `specialized
non-type argument' bit. */
&& TREE_CODE (arg) != TEMPLATE_PARM_INDEX
&& !((REFERENCE_REF_P (arg)
|| TREE_CODE (arg) == VIEW_CONVERT_EXPR)
&& TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_PARM_INDEX))
{
if ((!packed_args && tpd.arg_uses_template_parms[i])
|| (packed_args && uses_template_parms (arg)))
error_at (cp_expr_loc_or_input_loc (arg),
"template argument %qE involves template "
"parameter(s)", arg);
else
{
/* Look at the corresponding template parameter,
marking which template parameters its type depends
upon. */
tree type = TREE_TYPE (parm);
if (!tpd2.parms)
{
/* We haven't yet initialized TPD2. Do so now. */
tpd2.arg_uses_template_parms = XALLOCAVEC (int, nargs);
/* The number of parameters here is the number in the
main template, which, as checked in the assertion
above, is NARGS. */
tpd2.parms = XALLOCAVEC (int, nargs);
tpd2.level =
TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (maintmpl));
}
/* Mark the template parameters. But this time, we're
looking for the template parameters of the main
template, not in the specialization. */
tpd2.current_arg = i;
tpd2.arg_uses_template_parms[i] = 0;
memset (tpd2.parms, 0, sizeof (int) * nargs);
for_each_template_parm (type,
&mark_template_parm,
&tpd2,
NULL,
/*include_nondeduced_p=*/false);
if (tpd2.arg_uses_template_parms [i])
{
/* The type depended on some template parameters.
If they are fully specialized in the
specialization, that's OK. */
int j;
int count = 0;
for (j = 0; j < nargs; ++j)
if (tpd2.parms[j] != 0
&& tpd.arg_uses_template_parms [j])
++count;
if (count != 0)
error_n (input_location, count,
"type %qT of template argument %qE depends "
"on a template parameter",
"type %qT of template argument %qE depends "
"on template parameters",
type,
arg);
}
}
}
}
}
/* We should only get here once. */
if (TREE_CODE (decl) == TYPE_DECL)
gcc_assert (!COMPLETE_TYPE_P (type));
// Build the template decl.
tree tmpl = build_template_decl (decl, current_template_parms,
DECL_MEMBER_TEMPLATE_P (maintmpl));
TREE_TYPE (tmpl) = type;
DECL_TEMPLATE_RESULT (tmpl) = decl;
SET_DECL_TEMPLATE_SPECIALIZATION (tmpl);
DECL_TEMPLATE_INFO (tmpl) = build_template_info (maintmpl, specargs);
DECL_PRIMARY_TEMPLATE (tmpl) = maintmpl;
/* Give template template parms a DECL_CONTEXT of the template
for which they are a parameter. */
for (i = 0; i < ntparms; ++i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (inner_parms, i));
if (TREE_CODE (parm) == TEMPLATE_DECL)
DECL_CONTEXT (parm) = tmpl;
}
if (VAR_P (decl))
/* We didn't register this in check_explicit_specialization so we could
wait until the constraints were set. */
decl = register_specialization (decl, maintmpl, specargs, false, 0);
else
associate_classtype_constraints (type);
DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)
= tree_cons (specargs, tmpl,
DECL_TEMPLATE_SPECIALIZATIONS (maintmpl));
TREE_TYPE (DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)) = type;
for (inst = DECL_TEMPLATE_INSTANTIATIONS (maintmpl); inst;
inst = TREE_CHAIN (inst))
{
tree instance = TREE_VALUE (inst);
if (TYPE_P (instance)
? (COMPLETE_TYPE_P (instance)
&& CLASSTYPE_IMPLICIT_INSTANTIATION (instance))
: DECL_TEMPLATE_INSTANTIATION (instance))
{
tree spec = most_specialized_partial_spec (instance, tf_none);
tree inst_decl = (DECL_P (instance)
? instance : TYPE_NAME (instance));
if (!spec)
/* OK */;
else if (spec == error_mark_node)
permerror (input_location,
"declaration of %qD ambiguates earlier template "
"instantiation for %qD", decl, inst_decl);
else if (TREE_VALUE (spec) == tmpl)
permerror (input_location,
"partial specialization of %qD after instantiation "
"of %qD", decl, inst_decl);
}
}
return decl;
}
/* PARM is a template parameter of some form; return the corresponding
TEMPLATE_PARM_INDEX. */
static tree
get_template_parm_index (tree parm)
{
if (TREE_CODE (parm) == PARM_DECL
|| TREE_CODE (parm) == CONST_DECL)
parm = DECL_INITIAL (parm);
else if (TREE_CODE (parm) == TYPE_DECL
|| TREE_CODE (parm) == TEMPLATE_DECL)
parm = TREE_TYPE (parm);
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
|| TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM)
parm = TEMPLATE_TYPE_PARM_INDEX (parm);
gcc_assert (TREE_CODE (parm) == TEMPLATE_PARM_INDEX);
return parm;
}
/* Subroutine of fixed_parameter_pack_p below. Look for any template
parameter packs used by the template parameter PARM. */
static void
fixed_parameter_pack_p_1 (tree parm, struct find_parameter_pack_data *ppd)
{
/* A type parm can't refer to another parm. */
if (TREE_CODE (parm) == TYPE_DECL || parm == error_mark_node)
return;
else if (TREE_CODE (parm) == PARM_DECL)
{
cp_walk_tree (&TREE_TYPE (parm), &find_parameter_packs_r,
ppd, ppd->visited);
return;
}
gcc_assert (TREE_CODE (parm) == TEMPLATE_DECL);
tree vec = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (parm));
for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i)
{
tree p = TREE_VALUE (TREE_VEC_ELT (vec, i));
if (template_parameter_pack_p (p))
/* Any packs in the type are expanded by this parameter. */;
else
fixed_parameter_pack_p_1 (p, ppd);
}
}
/* PARM is a template parameter pack. Return any parameter packs used in
its type or the type of any of its template parameters. If there are
any such packs, it will be instantiated into a fixed template parameter
list by partial instantiation rather than be fully deduced. */
tree
fixed_parameter_pack_p (tree parm)
{
/* This can only be true in a member template. */
if (TEMPLATE_PARM_ORIG_LEVEL (get_template_parm_index (parm)) < 2)
return NULL_TREE;
/* This can only be true for a parameter pack. */
if (!template_parameter_pack_p (parm))
return NULL_TREE;
/* A type parm can't refer to another parm. */
if (TREE_CODE (parm) == TYPE_DECL)
return NULL_TREE;
tree parameter_packs = NULL_TREE;
struct find_parameter_pack_data ppd;
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = false;
fixed_parameter_pack_p_1 (parm, &ppd);
delete ppd.visited;
return parameter_packs;
}
/* Check that a template declaration's use of default arguments and
parameter packs is not invalid. Here, PARMS are the template
parameters. IS_PRIMARY is true if DECL is the thing declared by
a primary template. IS_PARTIAL is true if DECL is a partial
specialization.
IS_FRIEND_DECL is nonzero if DECL is either a non-defining friend
function template declaration or a friend class template
declaration. In the function case, 1 indicates a declaration, 2
indicates a redeclaration. When IS_FRIEND_DECL=2, no errors are
emitted for extraneous default arguments.
Returns TRUE if there were no errors found, FALSE otherwise. */
bool
check_default_tmpl_args (tree decl, tree parms, bool is_primary,
bool is_partial, int is_friend_decl)
{
const char *msg;
int last_level_to_check;
tree parm_level;
bool no_errors = true;
/* [temp.param]
A default template-argument shall not be specified in a
function template declaration or a function template definition, nor
in the template-parameter-list of the definition of a member of a
class template. */
if (TREE_CODE (CP_DECL_CONTEXT (decl)) == FUNCTION_DECL
|| (TREE_CODE (decl) == FUNCTION_DECL && DECL_LOCAL_FUNCTION_P (decl)))
/* You can't have a function template declaration in a local
scope, nor you can you define a member of a class template in a
local scope. */
return true;
if ((TREE_CODE (decl) == TYPE_DECL
&& TREE_TYPE (decl)
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
|| (TREE_CODE (decl) == FUNCTION_DECL
&& LAMBDA_FUNCTION_P (decl)))
/* A lambda doesn't have an explicit declaration; don't complain
about the parms of the enclosing class. */
return true;
if (current_class_type
&& !TYPE_BEING_DEFINED (current_class_type)
&& DECL_LANG_SPECIFIC (decl)
&& DECL_DECLARES_FUNCTION_P (decl)
/* If this is either a friend defined in the scope of the class
or a member function. */
&& (DECL_FUNCTION_MEMBER_P (decl)
? same_type_p (DECL_CONTEXT (decl), current_class_type)
: DECL_FRIEND_CONTEXT (decl)
? same_type_p (DECL_FRIEND_CONTEXT (decl), current_class_type)
: false)
/* And, if it was a member function, it really was defined in
the scope of the class. */
&& (!DECL_FUNCTION_MEMBER_P (decl)
|| DECL_INITIALIZED_IN_CLASS_P (decl)))
/* We already checked these parameters when the template was
declared, so there's no need to do it again now. This function
was defined in class scope, but we're processing its body now
that the class is complete. */
return true;
/* Core issue 226 (C++0x only): the following only applies to class
templates. */
if (is_primary
&& ((cxx_dialect == cxx98) || TREE_CODE (decl) != FUNCTION_DECL))
{
/* [temp.param]
If a template-parameter has a default template-argument, all
subsequent template-parameters shall have a default
template-argument supplied. */
for (parm_level = parms; parm_level; parm_level = TREE_CHAIN (parm_level))
{
tree inner_parms = TREE_VALUE (parm_level);
int ntparms = TREE_VEC_LENGTH (inner_parms);
int seen_def_arg_p = 0;
int i;
for (i = 0; i < ntparms; ++i)
{
tree parm = TREE_VEC_ELT (inner_parms, i);
if (parm == error_mark_node)
continue;
if (TREE_PURPOSE (parm))
seen_def_arg_p = 1;
else if (seen_def_arg_p
&& !template_parameter_pack_p (TREE_VALUE (parm)))
{
error ("no default argument for %qD", TREE_VALUE (parm));
/* For better subsequent error-recovery, we indicate that
there should have been a default argument. */
TREE_PURPOSE (parm) = error_mark_node;
no_errors = false;
}
else if (!is_partial
&& !is_friend_decl
/* Don't complain about an enclosing partial
specialization. */
&& parm_level == parms
&& TREE_CODE (decl) == TYPE_DECL
&& i < ntparms - 1
&& template_parameter_pack_p (TREE_VALUE (parm))
/* A fixed parameter pack will be partially
instantiated into a fixed length list. */
&& !fixed_parameter_pack_p (TREE_VALUE (parm)))
{
/* A primary class template can only have one
parameter pack, at the end of the template
parameter list. */
error ("parameter pack %q+D must be at the end of the"
" template parameter list", TREE_VALUE (parm));
TREE_VALUE (TREE_VEC_ELT (inner_parms, i))
= error_mark_node;
no_errors = false;
}
}
}
}
if (((cxx_dialect == cxx98) && TREE_CODE (decl) != TYPE_DECL)
|| is_partial
|| !is_primary
|| is_friend_decl)
/* For an ordinary class template, default template arguments are
allowed at the innermost level, e.g.:
template <class T = int>
struct S {};
but, in a partial specialization, they're not allowed even
there, as we have in [temp.class.spec]:
The template parameter list of a specialization shall not
contain default template argument values.
So, for a partial specialization, or for a function template
(in C++98/C++03), we look at all of them. */
;
else
/* But, for a primary class template that is not a partial
specialization we look at all template parameters except the
innermost ones. */
parms = TREE_CHAIN (parms);
/* Figure out what error message to issue. */
if (is_friend_decl == 2)
msg = G_("default template arguments may not be used in function template "
"friend re-declaration");
else if (is_friend_decl)
msg = G_("default template arguments may not be used in template "
"friend declarations");
else if (TREE_CODE (decl) == FUNCTION_DECL && (cxx_dialect == cxx98))
msg = G_("default template arguments may not be used in function templates "
"without %<-std=c++11%> or %<-std=gnu++11%>");
else if (is_partial)
msg = G_("default template arguments may not be used in "
"partial specializations");
else if (current_class_type && CLASSTYPE_IS_TEMPLATE (current_class_type))
msg = G_("default argument for template parameter for class enclosing %qD");
else
/* Per [temp.param]/9, "A default template-argument shall not be
specified in the template-parameter-lists of the definition of
a member of a class template that appears outside of the member's
class.", thus if we aren't handling a member of a class template
there is no need to examine the parameters. */
return true;
if (current_class_type && TYPE_BEING_DEFINED (current_class_type))
/* If we're inside a class definition, there's no need to
examine the parameters to the class itself. On the one
hand, they will be checked when the class is defined, and,
on the other, default arguments are valid in things like:
template <class T = double>
struct S { template <class U> void f(U); };
Here the default argument for `S' has no bearing on the
declaration of `f'. */
last_level_to_check = template_class_depth (current_class_type) + 1;
else
/* Check everything. */
last_level_to_check = 0;
for (parm_level = parms;
parm_level && TMPL_PARMS_DEPTH (parm_level) >= last_level_to_check;
parm_level = TREE_CHAIN (parm_level))
{
tree inner_parms = TREE_VALUE (parm_level);
int i;
int ntparms;
ntparms = TREE_VEC_LENGTH (inner_parms);
for (i = 0; i < ntparms; ++i)
{
if (TREE_VEC_ELT (inner_parms, i) == error_mark_node)
continue;
if (TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)))
{
if (msg)
{
no_errors = false;
if (is_friend_decl == 2)
return no_errors;
error (msg, decl);
msg = 0;
}
/* Clear out the default argument so that we are not
confused later. */
TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)) = NULL_TREE;
}
}
/* At this point, if we're still interested in issuing messages,
they must apply to classes surrounding the object declared. */
if (msg)
msg = G_("default argument for template parameter for class "
"enclosing %qD");
}
return no_errors;
}
/* Worker for push_template_decl_real, called via
for_each_template_parm. DATA is really an int, indicating the
level of the parameters we are interested in. If T is a template
parameter of that level, return nonzero. */
static int
template_parm_this_level_p (tree t, void* data)
{
int this_level = *(int *)data;
int level;
if (TREE_CODE (t) == TEMPLATE_PARM_INDEX)
level = TEMPLATE_PARM_LEVEL (t);
else
level = TEMPLATE_TYPE_LEVEL (t);
return level == this_level;
}
/* Worker for uses_outer_template_parms, called via for_each_template_parm.
DATA is really an int, indicating the innermost outer level of parameters.
If T is a template parameter of that level or further out, return
nonzero. */
static int
template_parm_outer_level (tree t, void *data)
{
int this_level = *(int *)data;
int level;
if (TREE_CODE (t) == TEMPLATE_PARM_INDEX)
level = TEMPLATE_PARM_LEVEL (t);
else
level = TEMPLATE_TYPE_LEVEL (t);
return level <= this_level;
}
/* Creates a TEMPLATE_DECL for the indicated DECL using the template
parameters given by current_template_args, or reuses a
previously existing one, if appropriate. Returns the DECL, or an
equivalent one, if it is replaced via a call to duplicate_decls.
If IS_FRIEND is true, DECL is a friend declaration. */
tree
push_template_decl_real (tree decl, bool is_friend)
{
tree tmpl;
tree args;
tree info;
tree ctx;
bool is_primary;
bool is_partial;
int new_template_p = 0;
/* True if the template is a member template, in the sense of
[temp.mem]. */
bool member_template_p = false;
if (decl == error_mark_node || !current_template_parms)
return error_mark_node;
/* See if this is a partial specialization. */
is_partial = ((DECL_IMPLICIT_TYPEDEF_P (decl)
&& TREE_CODE (TREE_TYPE (decl)) != ENUMERAL_TYPE
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl)))
|| (VAR_P (decl)
&& DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_SPECIALIZATION (decl)
&& TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl))));
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_P (decl))
is_friend = true;
if (is_friend)
/* For a friend, we want the context of the friend, not
the type of which it is a friend. */
ctx = CP_DECL_CONTEXT (decl);
else if (CP_DECL_CONTEXT (decl)
&& TREE_CODE (CP_DECL_CONTEXT (decl)) != NAMESPACE_DECL)
/* In the case of a virtual function, we want the class in which
it is defined. */
ctx = CP_DECL_CONTEXT (decl);
else
/* Otherwise, if we're currently defining some class, the DECL
is assumed to be a member of the class. */
ctx = current_scope ();
if (ctx && TREE_CODE (ctx) == NAMESPACE_DECL)
ctx = NULL_TREE;
if (!DECL_CONTEXT (decl))
DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace);
/* See if this is a primary template. */
if (is_friend && ctx
&& uses_template_parms_level (ctx, processing_template_decl))
/* A friend template that specifies a class context, i.e.
template <typename T> friend void A<T>::f();
is not primary. */
is_primary = false;
else if (TREE_CODE (decl) == TYPE_DECL
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
is_primary = false;
else
is_primary = template_parm_scope_p ();
if (is_primary)
{
warning (OPT_Wtemplates, "template %qD declared", decl);
if (DECL_CLASS_SCOPE_P (decl))
member_template_p = true;
if (TREE_CODE (decl) == TYPE_DECL
&& IDENTIFIER_ANON_P (DECL_NAME (decl)))
{
error ("template class without a name");
return error_mark_node;
}
else if (TREE_CODE (decl) == FUNCTION_DECL)
{
if (member_template_p)
{
if (DECL_OVERRIDE_P (decl) || DECL_FINAL_P (decl))
error ("member template %qD may not have virt-specifiers", decl);
}
if (DECL_DESTRUCTOR_P (decl))
{
/* [temp.mem]
A destructor shall not be a member template. */
error_at (DECL_SOURCE_LOCATION (decl),
"destructor %qD declared as member template", decl);
return error_mark_node;
}
if (IDENTIFIER_NEWDEL_OP_P (DECL_NAME (decl))
&& (!prototype_p (TREE_TYPE (decl))
|| TYPE_ARG_TYPES (TREE_TYPE (decl)) == void_list_node
|| !TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl)))
|| (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl)))
== void_list_node)))
{
/* [basic.stc.dynamic.allocation]
An allocation function can be a function
template. ... Template allocation functions shall
have two or more parameters. */
error ("invalid template declaration of %qD", decl);
return error_mark_node;
}
}
else if (DECL_IMPLICIT_TYPEDEF_P (decl)
&& CLASS_TYPE_P (TREE_TYPE (decl)))
{
/* Class template, set TEMPLATE_TYPE_PARM_FOR_CLASS. */
tree parms = INNERMOST_TEMPLATE_PARMS (current_template_parms);
for (int i = 0; i < TREE_VEC_LENGTH (parms); ++i)
{
tree t = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (TREE_CODE (t) == TYPE_DECL)
t = TREE_TYPE (t);
if (TREE_CODE (t) == TEMPLATE_TYPE_PARM)
TEMPLATE_TYPE_PARM_FOR_CLASS (t) = true;
}
}
else if (TREE_CODE (decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (decl))
/* alias-declaration */
gcc_assert (!DECL_ARTIFICIAL (decl));
else if (VAR_P (decl))
/* C++14 variable template. */;
else if (TREE_CODE (decl) == CONCEPT_DECL)
/* C++2a concept definitions. */;
else
{
error ("template declaration of %q#D", decl);
return error_mark_node;
}
}
/* Check to see that the rules regarding the use of default
arguments are not being violated. We check args for a friend
functions when we know whether it's a definition, introducing
declaration or re-declaration. */
if (!is_friend || TREE_CODE (decl) != FUNCTION_DECL)
check_default_tmpl_args (decl, current_template_parms,
is_primary, is_partial, is_friend);
/* Ensure that there are no parameter packs in the type of this
declaration that have not been expanded. */
if (TREE_CODE (decl) == FUNCTION_DECL)
{
/* Check each of the arguments individually to see if there are
any bare parameter packs. */
tree type = TREE_TYPE (decl);
tree arg = DECL_ARGUMENTS (decl);
tree argtype = TYPE_ARG_TYPES (type);
while (arg && argtype)
{
if (!DECL_PACK_P (arg)
&& check_for_bare_parameter_packs (TREE_TYPE (arg)))
{
/* This is a PARM_DECL that contains unexpanded parameter
packs. We have already complained about this in the
check_for_bare_parameter_packs call, so just replace
these types with ERROR_MARK_NODE. */
TREE_TYPE (arg) = error_mark_node;
TREE_VALUE (argtype) = error_mark_node;
}
arg = DECL_CHAIN (arg);
argtype = TREE_CHAIN (argtype);
}
/* Check for bare parameter packs in the return type and the
exception specifiers. */
if (check_for_bare_parameter_packs (TREE_TYPE (type)))
/* Errors were already issued, set return type to int
as the frontend doesn't expect error_mark_node as
the return type. */
TREE_TYPE (type) = integer_type_node;
if (check_for_bare_parameter_packs (TYPE_RAISES_EXCEPTIONS (type)))
TYPE_RAISES_EXCEPTIONS (type) = NULL_TREE;
}
else if (check_for_bare_parameter_packs (is_typedef_decl (decl)
? DECL_ORIGINAL_TYPE (decl)
: TREE_TYPE (decl)))
{
TREE_TYPE (decl) = error_mark_node;
return error_mark_node;
}
if (is_partial)
return process_partial_specialization (decl);
args = current_template_args ();
if (!ctx
|| TREE_CODE (ctx) == FUNCTION_DECL
|| (CLASS_TYPE_P (ctx) && TYPE_BEING_DEFINED (ctx))
|| (TREE_CODE (decl) == TYPE_DECL
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
|| (is_friend && !DECL_TEMPLATE_INFO (decl)))
{
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& DECL_TI_TEMPLATE (decl))
tmpl = DECL_TI_TEMPLATE (decl);
/* If DECL is a TYPE_DECL for a class-template, then there won't
be DECL_LANG_SPECIFIC. The information equivalent to
DECL_TEMPLATE_INFO is found in TYPE_TEMPLATE_INFO instead. */
else if (DECL_IMPLICIT_TYPEDEF_P (decl)
&& TYPE_TEMPLATE_INFO (TREE_TYPE (decl))
&& TYPE_TI_TEMPLATE (TREE_TYPE (decl)))
{
/* Since a template declaration already existed for this
class-type, we must be redeclaring it here. Make sure
that the redeclaration is valid. */
redeclare_class_template (TREE_TYPE (decl),
current_template_parms,
current_template_constraints ());
/* We don't need to create a new TEMPLATE_DECL; just use the
one we already had. */
tmpl = TYPE_TI_TEMPLATE (TREE_TYPE (decl));
}
else
{
tmpl = build_template_decl (decl, current_template_parms,
member_template_p);
new_template_p = 1;
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_SPECIALIZATION (decl))
{
/* A specialization of a member template of a template
class. */
SET_DECL_TEMPLATE_SPECIALIZATION (tmpl);
DECL_TEMPLATE_INFO (tmpl) = DECL_TEMPLATE_INFO (decl);
DECL_TEMPLATE_INFO (decl) = NULL_TREE;
}
}
}
else
{
tree a, t, current, parms;
int i;
tree tinfo = get_template_info (decl);
if (!tinfo)
{
error ("template definition of non-template %q#D", decl);
return error_mark_node;
}
tmpl = TI_TEMPLATE (tinfo);
if (DECL_FUNCTION_TEMPLATE_P (tmpl)
&& DECL_TEMPLATE_INFO (decl) && DECL_TI_ARGS (decl)
&& DECL_TEMPLATE_SPECIALIZATION (decl)
&& DECL_MEMBER_TEMPLATE_P (tmpl))
{
tree new_tmpl;
/* The declaration is a specialization of a member
template, declared outside the class. Therefore, the
innermost template arguments will be NULL, so we
replace them with the arguments determined by the
earlier call to check_explicit_specialization. */
args = DECL_TI_ARGS (decl);
new_tmpl
= build_template_decl (decl, current_template_parms,
member_template_p);
DECL_TEMPLATE_RESULT (new_tmpl) = decl;
TREE_TYPE (new_tmpl) = TREE_TYPE (decl);
DECL_TI_TEMPLATE (decl) = new_tmpl;
SET_DECL_TEMPLATE_SPECIALIZATION (new_tmpl);
DECL_TEMPLATE_INFO (new_tmpl)
= build_template_info (tmpl, args);
register_specialization (new_tmpl,
most_general_template (tmpl),
args,
is_friend, 0);
return decl;
}
/* Make sure the template headers we got make sense. */
parms = DECL_TEMPLATE_PARMS (tmpl);
i = TMPL_PARMS_DEPTH (parms);
if (TMPL_ARGS_DEPTH (args) != i)
{
error ("expected %d levels of template parms for %q#D, got %d",
i, decl, TMPL_ARGS_DEPTH (args));
DECL_INTERFACE_KNOWN (decl) = 1;
return error_mark_node;
}
else
for (current = decl; i > 0; --i, parms = TREE_CHAIN (parms))
{
a = TMPL_ARGS_LEVEL (args, i);
t = INNERMOST_TEMPLATE_PARMS (parms);
if (TREE_VEC_LENGTH (t) != TREE_VEC_LENGTH (a))
{
if (current == decl)
error ("got %d template parameters for %q#D",
TREE_VEC_LENGTH (a), decl);
else
error ("got %d template parameters for %q#T",
TREE_VEC_LENGTH (a), current);
error (" but %d required", TREE_VEC_LENGTH (t));
/* Avoid crash in import_export_decl. */
DECL_INTERFACE_KNOWN (decl) = 1;
return error_mark_node;
}
if (current == decl)
current = ctx;
else if (current == NULL_TREE)
/* Can happen in erroneous input. */
break;
else
current = get_containing_scope (current);
}
/* Check that the parms are used in the appropriate qualifying scopes
in the declarator. */
if (!comp_template_args
(TI_ARGS (tinfo),
TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl)))))
{
error ("template arguments to %qD do not match original "
"template %qD", decl, DECL_TEMPLATE_RESULT (tmpl));
if (!uses_template_parms (TI_ARGS (tinfo)))
inform (input_location, "use %<template<>%> for"
" an explicit specialization");
/* Avoid crash in import_export_decl. */
DECL_INTERFACE_KNOWN (decl) = 1;
return error_mark_node;
}
}
DECL_TEMPLATE_RESULT (tmpl) = decl;
TREE_TYPE (tmpl) = TREE_TYPE (decl);
/* Push template declarations for global functions and types. Note
that we do not try to push a global template friend declared in a
template class; such a thing may well depend on the template
parameters of the class. */
if (new_template_p && !ctx
&& !(is_friend && template_class_depth (current_class_type) > 0))
{
tmpl = pushdecl_namespace_level (tmpl, is_friend);
if (tmpl == error_mark_node)
return error_mark_node;
/* Hide template friend classes that haven't been declared yet. */
if (is_friend && TREE_CODE (decl) == TYPE_DECL)
{
DECL_ANTICIPATED (tmpl) = 1;
DECL_FRIEND_P (tmpl) = 1;
}
}
if (is_primary)
{
tree parms = DECL_TEMPLATE_PARMS (tmpl);
DECL_PRIMARY_TEMPLATE (tmpl) = tmpl;
/* Give template template parms a DECL_CONTEXT of the template
for which they are a parameter. */
parms = INNERMOST_TEMPLATE_PARMS (parms);
for (int i = TREE_VEC_LENGTH (parms) - 1; i >= 0; --i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (TREE_CODE (parm) == TEMPLATE_DECL)
DECL_CONTEXT (parm) = tmpl;
}
if (TREE_CODE (decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (decl))
{
if (tree constr
= TEMPLATE_PARMS_CONSTRAINTS (DECL_TEMPLATE_PARMS (tmpl)))
{
/* ??? Why don't we do this here for all templates? */
constr = build_constraints (constr, NULL_TREE);
set_constraints (decl, constr);
}
if (complex_alias_template_p (tmpl))
TEMPLATE_DECL_COMPLEX_ALIAS_P (tmpl) = true;
}
}
/* The DECL_TI_ARGS of DECL contains full set of arguments referring
back to its most general template. If TMPL is a specialization,
ARGS may only have the innermost set of arguments. Add the missing
argument levels if necessary. */
if (DECL_TEMPLATE_INFO (tmpl))
args = add_outermost_template_args (DECL_TI_ARGS (tmpl), args);
info = build_template_info (tmpl, args);
if (DECL_IMPLICIT_TYPEDEF_P (decl))
SET_TYPE_TEMPLATE_INFO (TREE_TYPE (tmpl), info);
else
{
if (is_primary)
retrofit_lang_decl (decl);
if (DECL_LANG_SPECIFIC (decl))
DECL_TEMPLATE_INFO (decl) = info;
}
if (flag_implicit_templates
&& !is_friend
&& TREE_PUBLIC (decl)
&& VAR_OR_FUNCTION_DECL_P (decl))
/* Set DECL_COMDAT on template instantiations; if we force
them to be emitted by explicit instantiation,
mark_needed will tell cgraph to do the right thing. */
DECL_COMDAT (decl) = true;
return DECL_TEMPLATE_RESULT (tmpl);
}
tree
push_template_decl (tree decl)
{
return push_template_decl_real (decl, false);
}
/* FN is an inheriting constructor that inherits from the constructor
template INHERITED; turn FN into a constructor template with a matching
template header. */
tree
add_inherited_template_parms (tree fn, tree inherited)
{
tree inner_parms
= INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (inherited));
inner_parms = copy_node (inner_parms);
tree parms
= tree_cons (size_int (processing_template_decl + 1),
inner_parms, current_template_parms);
tree tmpl = build_template_decl (fn, parms, /*member*/true);
tree args = template_parms_to_args (parms);
DECL_TEMPLATE_INFO (fn) = build_template_info (tmpl, args);
TREE_TYPE (tmpl) = TREE_TYPE (fn);
DECL_TEMPLATE_RESULT (tmpl) = fn;
DECL_ARTIFICIAL (tmpl) = true;
DECL_PRIMARY_TEMPLATE (tmpl) = tmpl;
return tmpl;
}
/* Called when a class template TYPE is redeclared with the indicated
template PARMS, e.g.:
template <class T> struct S;
template <class T> struct S {}; */
bool
redeclare_class_template (tree type, tree parms, tree cons)
{
tree tmpl;
tree tmpl_parms;
int i;
if (!TYPE_TEMPLATE_INFO (type))
{
error ("%qT is not a template type", type);
return false;
}
tmpl = TYPE_TI_TEMPLATE (type);
if (!PRIMARY_TEMPLATE_P (tmpl))
/* The type is nested in some template class. Nothing to worry
about here; there are no new template parameters for the nested
type. */
return true;
if (!parms)
{
error ("template specifiers not specified in declaration of %qD",
tmpl);
return false;
}
parms = INNERMOST_TEMPLATE_PARMS (parms);
tmpl_parms = DECL_INNERMOST_TEMPLATE_PARMS (tmpl);
if (TREE_VEC_LENGTH (parms) != TREE_VEC_LENGTH (tmpl_parms))
{
error_n (input_location, TREE_VEC_LENGTH (parms),
"redeclared with %d template parameter",
"redeclared with %d template parameters",
TREE_VEC_LENGTH (parms));
inform_n (DECL_SOURCE_LOCATION (tmpl), TREE_VEC_LENGTH (tmpl_parms),
"previous declaration %qD used %d template parameter",
"previous declaration %qD used %d template parameters",
tmpl, TREE_VEC_LENGTH (tmpl_parms));
return false;
}
for (i = 0; i < TREE_VEC_LENGTH (tmpl_parms); ++i)
{
tree tmpl_parm;
tree parm;
tree tmpl_default;
tree parm_default;
if (TREE_VEC_ELT (tmpl_parms, i) == error_mark_node
|| TREE_VEC_ELT (parms, i) == error_mark_node)
continue;
tmpl_parm = TREE_VALUE (TREE_VEC_ELT (tmpl_parms, i));
if (error_operand_p (tmpl_parm))
return false;
parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
tmpl_default = TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i));
parm_default = TREE_PURPOSE (TREE_VEC_ELT (parms, i));
/* TMPL_PARM and PARM can be either TYPE_DECL, PARM_DECL, or
TEMPLATE_DECL. */
if (TREE_CODE (tmpl_parm) != TREE_CODE (parm)
|| (TREE_CODE (tmpl_parm) != TYPE_DECL
&& !same_type_p (TREE_TYPE (tmpl_parm), TREE_TYPE (parm)))
|| (TREE_CODE (tmpl_parm) != PARM_DECL
&& (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (tmpl_parm))
!= TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))))
|| (TREE_CODE (tmpl_parm) == PARM_DECL
&& (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (tmpl_parm))
!= TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))))
{
auto_diagnostic_group d;
error ("template parameter %q+#D", tmpl_parm);
inform (input_location, "redeclared here as %q#D", parm);
return false;
}
/* The parameters can be declared to introduce different
constraints. */
tree p1 = TREE_VEC_ELT (tmpl_parms, i);
tree p2 = TREE_VEC_ELT (parms, i);
if (!template_parameter_constraints_equivalent_p (p1, p2))
{
auto_diagnostic_group d;
error ("declaration of template parameter %q+#D with different "
"constraints", parm);
inform (DECL_SOURCE_LOCATION (tmpl_parm),
"original declaration appeared here");
return false;
}
if (tmpl_default != NULL_TREE && parm_default != NULL_TREE)
{
/* We have in [temp.param]:
A template-parameter may not be given default arguments
by two different declarations in the same scope. */
auto_diagnostic_group d;
error_at (input_location, "redefinition of default argument for %q#D", parm);
inform (DECL_SOURCE_LOCATION (tmpl_parm),
"original definition appeared here");
return false;
}
if (parm_default != NULL_TREE)
/* Update the previous template parameters (which are the ones
that will really count) with the new default value. */
TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)) = parm_default;
else if (tmpl_default != NULL_TREE)
/* Update the new parameters, too; they'll be used as the
parameters for any members. */
TREE_PURPOSE (TREE_VEC_ELT (parms, i)) = tmpl_default;
/* Give each template template parm in this redeclaration a
DECL_CONTEXT of the template for which they are a parameter. */
if (TREE_CODE (parm) == TEMPLATE_DECL)
{
gcc_assert (DECL_CONTEXT (parm) == NULL_TREE);
DECL_CONTEXT (parm) = tmpl;
}
if (TREE_CODE (parm) == TYPE_DECL)
TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (parm)) = true;
}
tree ci = get_constraints (tmpl);
tree req1 = ci ? CI_TEMPLATE_REQS (ci) : NULL_TREE;
tree req2 = cons ? CI_TEMPLATE_REQS (cons) : NULL_TREE;
/* Two classes with different constraints declare different entities. */
if (!cp_tree_equal (req1, req2))
{
auto_diagnostic_group d;
error_at (input_location, "redeclaration %q#D with different "
"constraints", tmpl);
inform (DECL_SOURCE_LOCATION (tmpl),
"original declaration appeared here");
return false;
}
return true;
}
/* The actual substitution part of instantiate_non_dependent_expr_sfinae,
to be used when the caller has already checked
(processing_template_decl
&& !instantiation_dependent_expression_p (expr)
&& potential_constant_expression (expr))
and cleared processing_template_decl. */
tree
instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain)
{
return tsubst_copy_and_build (expr,
/*args=*/NULL_TREE,
complain,
/*in_decl=*/NULL_TREE,
/*function_p=*/false,
/*integral_constant_expression_p=*/true);
}
/* Simplify EXPR if it is a non-dependent expression. Returns the
(possibly simplified) expression. */
tree
instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain)
{
if (expr == NULL_TREE)
return NULL_TREE;
/* If we're in a template, but EXPR isn't value dependent, simplify
it. We're supposed to treat:
template <typename T> void f(T[1 + 1]);
template <typename T> void f(T[2]);
as two declarations of the same function, for example. */
if (processing_template_decl
&& is_nondependent_constant_expression (expr))
{
processing_template_decl_sentinel s;
expr = instantiate_non_dependent_expr_internal (expr, complain);
}
return expr;
}
tree
instantiate_non_dependent_expr (tree expr)
{
return instantiate_non_dependent_expr_sfinae (expr, tf_error);
}
/* Like instantiate_non_dependent_expr, but return NULL_TREE rather than
an uninstantiated expression. */
tree
instantiate_non_dependent_or_null (tree expr)
{
if (expr == NULL_TREE)
return NULL_TREE;
if (processing_template_decl)
{
if (!is_nondependent_constant_expression (expr))
expr = NULL_TREE;
else
{
processing_template_decl_sentinel s;
expr = instantiate_non_dependent_expr_internal (expr, tf_error);
}
}
return expr;
}
/* True iff T is a specialization of a variable template. */
bool
variable_template_specialization_p (tree t)
{
if (!VAR_P (t) || !DECL_LANG_SPECIFIC (t) || !DECL_TEMPLATE_INFO (t))
return false;
tree tmpl = DECL_TI_TEMPLATE (t);
return variable_template_p (tmpl);
}
/* Return TRUE iff T is a type alias, a TEMPLATE_DECL for an alias
template declaration, or a TYPE_DECL for an alias declaration. */
bool
alias_type_or_template_p (tree t)
{
if (t == NULL_TREE)
return false;
return ((TREE_CODE (t) == TYPE_DECL && TYPE_DECL_ALIAS_P (t))
|| (TYPE_P (t)
&& TYPE_NAME (t)
&& TYPE_DECL_ALIAS_P (TYPE_NAME (t)))
|| DECL_ALIAS_TEMPLATE_P (t));
}
/* If T is a specialization of an alias template, return it; otherwise return
NULL_TREE. If TRANSPARENT_TYPEDEFS is true, look through other aliases. */
tree
alias_template_specialization_p (const_tree t,
bool transparent_typedefs)
{
if (!TYPE_P (t))
return NULL_TREE;
/* It's an alias template specialization if it's an alias and its
TYPE_NAME is a specialization of a primary template. */
if (typedef_variant_p (t))
{
if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t))
if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)))
return CONST_CAST_TREE (t);
if (transparent_typedefs)
return alias_template_specialization_p (DECL_ORIGINAL_TYPE
(TYPE_NAME (t)),
transparent_typedefs);
}
return NULL_TREE;
}
/* An alias template is complex from a SFINAE perspective if a template-id
using that alias can be ill-formed when the expansion is not, as with
the void_t template. We determine this by checking whether the
expansion for the alias template uses all its template parameters. */
struct uses_all_template_parms_data
{
int level;
bool *seen;
};
static int
uses_all_template_parms_r (tree t, void *data_)
{
struct uses_all_template_parms_data &data
= *(struct uses_all_template_parms_data*)data_;
tree idx = get_template_parm_index (t);
if (TEMPLATE_PARM_LEVEL (idx) == data.level)
data.seen[TEMPLATE_PARM_IDX (idx)] = true;
return 0;
}
/* for_each_template_parm any_fn callback for complex_alias_template_p. */
static int
complex_pack_expansion_r (tree t, void *data_)
{
/* An alias template with a pack expansion that expands a pack from the
enclosing class needs to be considered complex, to avoid confusion with
the same pack being used as an argument to the alias's own template
parameter (91966). */
if (!PACK_EXPANSION_P (t))
return 0;
struct uses_all_template_parms_data &data
= *(struct uses_all_template_parms_data*)data_;
for (tree pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack;
pack = TREE_CHAIN (pack))
{
tree parm_pack = TREE_VALUE (pack);
if (!TEMPLATE_PARM_P (parm_pack))
continue;
int idx, level;
template_parm_level_and_index (parm_pack, &level, &idx);
if (level < data.level)
return 1;
}
return 0;
}
static bool
complex_alias_template_p (const_tree tmpl)
{
/* A renaming alias isn't complex. */
if (get_underlying_template (CONST_CAST_TREE (tmpl)) != tmpl)
return false;
/* Any other constrained alias is complex. */
if (get_constraints (tmpl))
return true;
struct uses_all_template_parms_data data;
tree pat = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
tree parms = DECL_TEMPLATE_PARMS (tmpl);
data.level = TMPL_PARMS_DEPTH (parms);
int len = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (parms));
data.seen = XALLOCAVEC (bool, len);
for (int i = 0; i < len; ++i)
data.seen[i] = false;
if (for_each_template_parm (pat, uses_all_template_parms_r, &data,
NULL, true, complex_pack_expansion_r))
return true;
for (int i = 0; i < len; ++i)
if (!data.seen[i])
return true;
return false;
}
/* If T is a specialization of a complex alias template with dependent
template-arguments, return it; otherwise return NULL_TREE. If T is a
typedef to such a specialization, return the specialization. */
tree
dependent_alias_template_spec_p (const_tree t, bool transparent_typedefs)
{
if (!TYPE_P (t) || !typedef_variant_p (t))
return NULL_TREE;
tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t);
if (tinfo
&& TEMPLATE_DECL_COMPLEX_ALIAS_P (TI_TEMPLATE (tinfo))
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)))))
return CONST_CAST_TREE (t);
if (transparent_typedefs)
{
tree utype = DECL_ORIGINAL_TYPE (TYPE_NAME (t));
return dependent_alias_template_spec_p (utype, transparent_typedefs);
}
return NULL_TREE;
}
/* Return the number of innermost template parameters in TMPL. */
static int
num_innermost_template_parms (const_tree tmpl)
{
tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl));
return TREE_VEC_LENGTH (parms);
}
/* Return either TMPL or another template that it is equivalent to under DR
1286: An alias that just changes the name of a template is equivalent to
the other template. */
static tree
get_underlying_template (tree tmpl)
{
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL);
while (DECL_ALIAS_TEMPLATE_P (tmpl))
{
/* Determine if the alias is equivalent to an underlying template. */
tree orig_type = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
/* The underlying type may have been ill-formed. Don't proceed. */
if (!orig_type)
break;
tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (orig_type);
if (!tinfo)
break;
tree underlying = TI_TEMPLATE (tinfo);
if (!PRIMARY_TEMPLATE_P (underlying)
|| (num_innermost_template_parms (tmpl)
!= num_innermost_template_parms (underlying)))
break;
tree alias_args = INNERMOST_TEMPLATE_ARGS (generic_targs_for (tmpl));
if (!comp_template_args (TI_ARGS (tinfo), alias_args))
break;
/* If TMPL adds or changes any constraints, it isn't equivalent. I think
it's appropriate to treat a less-constrained alias as equivalent. */
if (!at_least_as_constrained (underlying, tmpl))
break;
/* Alias is equivalent. Strip it and repeat. */
tmpl = underlying;
}
return tmpl;
}
/* Subroutine of convert_nontype_argument. Converts EXPR to TYPE, which
must be a reference-to-function or a pointer-to-function type, as specified
in [temp.arg.nontype]: disambiguate EXPR if it is an overload set,
and check that the resulting function has external linkage. */
static tree
convert_nontype_argument_function (tree type, tree expr,
tsubst_flags_t complain)
{
tree fns = expr;
tree fn, fn_no_ptr;
linkage_kind linkage;
fn = instantiate_type (type, fns, tf_none);
if (fn == error_mark_node)
return error_mark_node;
if (value_dependent_expression_p (fn))
goto accept;
fn_no_ptr = strip_fnptr_conv (fn);
if (TREE_CODE (fn_no_ptr) == ADDR_EXPR)
fn_no_ptr = TREE_OPERAND (fn_no_ptr, 0);
if (BASELINK_P (fn_no_ptr))
fn_no_ptr = BASELINK_FUNCTIONS (fn_no_ptr);
/* [temp.arg.nontype]/1
A template-argument for a non-type, non-template template-parameter
shall be one of:
[...]
-- the address of an object or function with external [C++11: or
internal] linkage. */
STRIP_ANY_LOCATION_WRAPPER (fn_no_ptr);
if (TREE_CODE (fn_no_ptr) != FUNCTION_DECL)
{
if (complain & tf_error)
{
location_t loc = cp_expr_loc_or_input_loc (expr);
error_at (loc, "%qE is not a valid template argument for type %qT",
expr, type);
if (TYPE_PTR_P (type))
inform (loc, "it must be the address of a function "
"with external linkage");
else
inform (loc, "it must be the name of a function with "
"external linkage");
}
return NULL_TREE;
}
linkage = decl_linkage (fn_no_ptr);
if (cxx_dialect >= cxx11 ? linkage == lk_none : linkage != lk_external)
{
if (complain & tf_error)
{
location_t loc = cp_expr_loc_or_input_loc (expr);
if (cxx_dialect >= cxx11)
error_at (loc, "%qE is not a valid template argument for type "
"%qT because %qD has no linkage",
expr, type, fn_no_ptr);
else
error_at (loc, "%qE is not a valid template argument for type "
"%qT because %qD does not have external linkage",
expr, type, fn_no_ptr);
}
return NULL_TREE;
}
accept:
if (TYPE_REF_P (type))
{
if (REFERENCE_REF_P (fn))
fn = TREE_OPERAND (fn, 0);
else
fn = build_address (fn);
}
if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (fn)))
fn = build_nop (type, fn);
return fn;
}
/* Subroutine of convert_nontype_argument.
Check if EXPR of type TYPE is a valid pointer-to-member constant.
Emit an error otherwise. */
static bool
check_valid_ptrmem_cst_expr (tree type, tree expr,
tsubst_flags_t complain)
{
tree orig_expr = expr;
STRIP_NOPS (expr);
if (null_ptr_cst_p (expr))
return true;
if (TREE_CODE (expr) == PTRMEM_CST
&& same_type_p (TYPE_PTRMEM_CLASS_TYPE (type),
PTRMEM_CST_CLASS (expr)))
return true;
if (cxx_dialect >= cxx11 && null_member_pointer_value_p (expr))
return true;
if (processing_template_decl
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == OFFSET_REF)
return true;
if (complain & tf_error)
{
location_t loc = cp_expr_loc_or_input_loc (orig_expr);
error_at (loc, "%qE is not a valid template argument for type %qT",
orig_expr, type);
if (TREE_CODE (expr) != PTRMEM_CST)
inform (loc, "it must be a pointer-to-member of the form %<&X::Y%>");
else
inform (loc, "because it is a member of %qT", PTRMEM_CST_CLASS (expr));
}
return false;
}
/* Returns TRUE iff the address of OP is value-dependent.
14.6.2.4 [temp.dep.temp]:
A non-integral non-type template-argument is dependent if its type is
dependent or it has either of the following forms
qualified-id
& qualified-id
and contains a nested-name-specifier which specifies a class-name that
names a dependent type.
We generalize this to just say that the address of a member of a
dependent class is value-dependent; the above doesn't cover the
address of a static data member named with an unqualified-id. */
static bool
has_value_dependent_address (tree op)
{
STRIP_ANY_LOCATION_WRAPPER (op);
/* We could use get_inner_reference here, but there's no need;
this is only relevant for template non-type arguments, which
can only be expressed as &id-expression. */
if (DECL_P (op))
{
tree ctx = CP_DECL_CONTEXT (op);
if (TYPE_P (ctx) && dependent_type_p (ctx))
return true;
}
return false;
}
/* The next set of functions are used for providing helpful explanatory
diagnostics for failed overload resolution. Their messages should be
indented by two spaces for consistency with the messages in
call.c */
static int
unify_success (bool /*explain_p*/)
{
return 0;
}
/* Other failure functions should call this one, to provide a single function
for setting a breakpoint on. */
static int
unify_invalid (bool /*explain_p*/)
{
return 1;
}
static int
unify_parameter_deduction_failure (bool explain_p, tree parm)
{
if (explain_p)
inform (input_location,
" couldn%'t deduce template parameter %qD", parm);
return unify_invalid (explain_p);
}
static int
unify_cv_qual_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" types %qT and %qT have incompatible cv-qualifiers",
parm, arg);
return unify_invalid (explain_p);
}
static int
unify_type_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location, " mismatched types %qT and %qT", parm, arg);
return unify_invalid (explain_p);
}
static int
unify_parameter_pack_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" template parameter %qD is not a parameter pack, but "
"argument %qD is",
parm, arg);
return unify_invalid (explain_p);
}
static int
unify_ptrmem_cst_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" template argument %qE does not match "
"pointer-to-member constant %qE",
arg, parm);
return unify_invalid (explain_p);
}
static int
unify_expression_unequal (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location, " %qE is not equivalent to %qE", parm, arg);
return unify_invalid (explain_p);
}
static int
unify_parameter_pack_inconsistent (bool explain_p, tree old_arg, tree new_arg)
{
if (explain_p)
inform (input_location,
" inconsistent parameter pack deduction with %qT and %qT",
old_arg, new_arg);
return unify_invalid (explain_p);
}
static int
unify_inconsistency (bool explain_p, tree parm, tree first, tree second)
{
if (explain_p)
{
if (TYPE_P (parm))
inform (input_location,
" deduced conflicting types for parameter %qT (%qT and %qT)",
parm, first, second);
else
inform (input_location,
" deduced conflicting values for non-type parameter "
"%qE (%qE and %qE)", parm, first, second);
}
return unify_invalid (explain_p);
}
static int
unify_vla_arg (bool explain_p, tree arg)
{
if (explain_p)
inform (input_location,
" variable-sized array type %qT is not "
"a valid template argument",
arg);
return unify_invalid (explain_p);
}
static int
unify_method_type_error (bool explain_p, tree arg)
{
if (explain_p)
inform (input_location,
" member function type %qT is not a valid template argument",
arg);
return unify_invalid (explain_p);
}
static int
unify_arity (bool explain_p, int have, int wanted, bool least_p = false)
{
if (explain_p)
{
if (least_p)
inform_n (input_location, wanted,
" candidate expects at least %d argument, %d provided",
" candidate expects at least %d arguments, %d provided",
wanted, have);
else
inform_n (input_location, wanted,
" candidate expects %d argument, %d provided",
" candidate expects %d arguments, %d provided",
wanted, have);
}
return unify_invalid (explain_p);
}
static int
unify_too_many_arguments (bool explain_p, int have, int wanted)
{
return unify_arity (explain_p, have, wanted);
}
static int
unify_too_few_arguments (bool explain_p, int have, int wanted,
bool least_p = false)
{
return unify_arity (explain_p, have, wanted, least_p);
}
static int
unify_arg_conversion (bool explain_p, tree to_type,
tree from_type, tree arg)
{
if (explain_p)
inform (cp_expr_loc_or_input_loc (arg),
" cannot convert %qE (type %qT) to type %qT",
arg, from_type, to_type);
return unify_invalid (explain_p);
}
static int
unify_no_common_base (bool explain_p, enum template_base_result r,
tree parm, tree arg)
{
if (explain_p)
switch (r)
{
case tbr_ambiguous_baseclass:
inform (input_location, " %qT is an ambiguous base class of %qT",
parm, arg);
break;
default:
inform (input_location, " %qT is not derived from %qT", arg, parm);
break;
}
return unify_invalid (explain_p);
}
static int
unify_inconsistent_template_template_parameters (bool explain_p)
{
if (explain_p)
inform (input_location,
" template parameters of a template template argument are "
"inconsistent with other deduced template arguments");
return unify_invalid (explain_p);
}
static int
unify_template_deduction_failure (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" cannot deduce a template for %qT from non-template type %qT",
parm, arg);
return unify_invalid (explain_p);
}
static int
unify_template_argument_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" template argument %qE does not match %qE", arg, parm);
return unify_invalid (explain_p);
}
/* True if T is a C++20 template parameter object to store the argument for a
template parameter of class type. */
bool
template_parm_object_p (const_tree t)
{
return (TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t) && DECL_NAME (t)
&& !strncmp (IDENTIFIER_POINTER (DECL_NAME (t)), "_ZTA", 4));
}
/* Subroutine of convert_nontype_argument, to check whether EXPR, as an
argument for TYPE, points to an unsuitable object. */
static bool
invalid_tparm_referent_p (tree type, tree expr, tsubst_flags_t complain)
{
switch (TREE_CODE (expr))
{
CASE_CONVERT:
return invalid_tparm_referent_p (type, TREE_OPERAND (expr, 0),
complain);
case TARGET_EXPR:
return invalid_tparm_referent_p (type, TARGET_EXPR_INITIAL (expr),
complain);
case CONSTRUCTOR:
{
unsigned i; tree elt;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt)
if (invalid_tparm_referent_p (TREE_TYPE (elt), elt, complain))
return true;
}
break;
case ADDR_EXPR:
{
tree decl = TREE_OPERAND (expr, 0);
if (!VAR_P (decl))
{
if (complain & tf_error)
error_at (cp_expr_loc_or_input_loc (expr),
"%qE is not a valid template argument of type %qT "
"because %qE is not a variable", expr, type, decl);
return true;
}
else if (cxx_dialect < cxx11 && !DECL_EXTERNAL_LINKAGE_P (decl))
{
if (complain & tf_error)
error_at (cp_expr_loc_or_input_loc (expr),
"%qE is not a valid template argument of type %qT "
"in C++98 because %qD does not have external linkage",
expr, type, decl);
return true;
}
else if ((cxx_dialect >= cxx11 && cxx_dialect < cxx17)
&& decl_linkage (decl) == lk_none)
{
if (complain & tf_error)
error_at (cp_expr_loc_or_input_loc (expr),
"%qE is not a valid template argument of type %qT "
"because %qD has no linkage", expr, type, decl);
return true;
}
/* C++17: For a non-type template-parameter of reference or pointer
type, the value of the constant expression shall not refer to (or
for a pointer type, shall not be the address of):
* a subobject (4.5),
* a temporary object (15.2),
* a string literal (5.13.5),
* the result of a typeid expression (8.2.8), or
* a predefined __func__ variable (11.4.1). */
else if (DECL_ARTIFICIAL (decl))
{
if (complain & tf_error)
error ("the address of %qD is not a valid template argument",
decl);
return true;
}
else if (!same_type_ignoring_top_level_qualifiers_p
(strip_array_types (TREE_TYPE (type)),
strip_array_types (TREE_TYPE (decl))))
{
if (complain & tf_error)
error ("the address of the %qT subobject of %qD is not a "
"valid template argument", TREE_TYPE (type), decl);
return true;
}
else if (!TREE_STATIC (decl) && !DECL_EXTERNAL (decl))
{
if (complain & tf_error)
error ("the address of %qD is not a valid template argument "
"because it does not have static storage duration",
decl);
return true;
}
}
break;
default:
if (!INDIRECT_TYPE_P (type))
/* We're only concerned about pointers and references here. */;
else if (cxx_dialect >= cxx11 && integer_zerop (expr))
/* Null pointer values are OK in C++11. */;
else
{
if (VAR_P (expr))
{
if (complain & tf_error)
error ("%qD is not a valid template argument "
"because %qD is a variable, not the address of "
"a variable", expr, expr);
return true;
}
else
{
if (complain & tf_error)
error ("%qE is not a valid template argument for %qT "
"because it is not the address of a variable",
expr, type);
return true;
}
}
}
return false;
}
/* The template arguments corresponding to template parameter objects of types
that contain pointers to members. */
static GTY(()) hash_map<tree, tree> *tparm_obj_values;
/* Return a VAR_DECL for the C++20 template parameter object corresponding to
template argument EXPR. */
static tree
get_template_parm_object (tree expr, tsubst_flags_t complain)
{
if (TREE_CODE (expr) == TARGET_EXPR)
expr = TARGET_EXPR_INITIAL (expr);
if (!TREE_CONSTANT (expr))
{
if ((complain & tf_error)
&& require_rvalue_constant_expression (expr))
cxx_constant_value (expr);
return error_mark_node;
}
if (invalid_tparm_referent_p (TREE_TYPE (expr), expr, complain))
return error_mark_node;
tree name = mangle_template_parm_object (expr);
tree decl = get_global_binding (name);
if (decl)
return decl;
tree type = cp_build_qualified_type (TREE_TYPE (expr), TYPE_QUAL_CONST);
decl = create_temporary_var (type);
TREE_STATIC (decl) = true;
DECL_DECLARED_CONSTEXPR_P (decl) = true;
TREE_READONLY (decl) = true;
DECL_NAME (decl) = name;
SET_DECL_ASSEMBLER_NAME (decl, name);
DECL_CONTEXT (decl) = global_namespace;
comdat_linkage (decl);
if (!zero_init_p (type))
{
/* If EXPR contains any PTRMEM_CST, they will get clobbered by
lower_var_init before we're done mangling. So store the original
value elsewhere. */
tree copy = unshare_constructor (expr);
hash_map_safe_put<hm_ggc> (tparm_obj_values, decl, copy);
}
pushdecl_top_level_and_finish (decl, expr);
return decl;
}
/* Return the actual template argument corresponding to template parameter
object VAR. */
tree
tparm_object_argument (tree var)
{
if (zero_init_p (TREE_TYPE (var)))
return DECL_INITIAL (var);
return *(tparm_obj_values->get (var));
}
/* Attempt to convert the non-type template parameter EXPR to the
indicated TYPE. If the conversion is successful, return the
converted value. If the conversion is unsuccessful, return
NULL_TREE if we issued an error message, or error_mark_node if we
did not. We issue error messages for out-and-out bad template
parameters, but not simply because the conversion failed, since we
might be just trying to do argument deduction. Both TYPE and EXPR
must be non-dependent.
The conversion follows the special rules described in
[temp.arg.nontype], and it is much more strict than an implicit
conversion.
This function is called twice for each template argument (see
lookup_template_class for a more accurate description of this
problem). This means that we need to handle expressions which
are not valid in a C++ source, but can be created from the
first call (for instance, casts to perform conversions). These
hacks can go away after we fix the double coercion problem. */
static tree
convert_nontype_argument (tree type, tree expr, tsubst_flags_t complain)
{
tree expr_type;
location_t loc = cp_expr_loc_or_input_loc (expr);
/* Detect immediately string literals as invalid non-type argument.
This special-case is not needed for correctness (we would easily
catch this later), but only to provide better diagnostic for this
common user mistake. As suggested by DR 100, we do not mention
linkage issues in the diagnostic as this is not the point. */
if (TREE_CODE (expr) == STRING_CST && !CLASS_TYPE_P (type))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because string literals can never be used in this context",
expr, type);
return NULL_TREE;
}
/* Add the ADDR_EXPR now for the benefit of
value_dependent_expression_p. */
if (TYPE_PTROBV_P (type)
&& TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE)
{
expr = decay_conversion (expr, complain);
if (expr == error_mark_node)
return error_mark_node;
}
/* If we are in a template, EXPR may be non-dependent, but still
have a syntactic, rather than semantic, form. For example, EXPR
might be a SCOPE_REF, rather than the VAR_DECL to which the
SCOPE_REF refers. Preserving the qualifying scope is necessary
so that access checking can be performed when the template is
instantiated -- but here we need the resolved form so that we can
convert the argument. */
bool non_dep = false;
if (TYPE_REF_OBJ_P (type)
&& has_value_dependent_address (expr))
/* If we want the address and it's value-dependent, don't fold. */;
else if (processing_template_decl
&& is_nondependent_constant_expression (expr))
non_dep = true;
if (error_operand_p (expr))
return error_mark_node;
expr_type = TREE_TYPE (expr);
/* If the argument is non-dependent, perform any conversions in
non-dependent context as well. */
processing_template_decl_sentinel s (non_dep);
if (non_dep)
expr = instantiate_non_dependent_expr_internal (expr, complain);
const bool val_dep_p = value_dependent_expression_p (expr);
if (val_dep_p)
expr = canonicalize_expr_argument (expr, complain);
/* 14.3.2/5: The null pointer{,-to-member} conversion is applied
to a non-type argument of "nullptr". */
if (NULLPTR_TYPE_P (expr_type) && TYPE_PTR_OR_PTRMEM_P (type))
expr = fold_simple (convert (type, expr));
/* In C++11, integral or enumeration non-type template arguments can be
arbitrary constant expressions. Pointer and pointer to
member arguments can be general constant expressions that evaluate
to a null value, but otherwise still need to be of a specific form. */
if (cxx_dialect >= cxx11)
{
if (TREE_CODE (expr) == PTRMEM_CST && TYPE_PTRMEM_P (type))
/* A PTRMEM_CST is already constant, and a valid template
argument for a parameter of pointer to member type, we just want
to leave it in that form rather than lower it to a
CONSTRUCTOR. */;
else if (INTEGRAL_OR_ENUMERATION_TYPE_P (type)
|| cxx_dialect >= cxx17)
{
/* C++17: A template-argument for a non-type template-parameter shall
be a converted constant expression (8.20) of the type of the
template-parameter. */
expr = build_converted_constant_expr (type, expr, complain);
if (expr == error_mark_node)
/* Make sure we return NULL_TREE only if we have really issued
an error, as described above. */
return (complain & tf_error) ? NULL_TREE : error_mark_node;
else if (TREE_CODE (expr) == IMPLICIT_CONV_EXPR)
{
IMPLICIT_CONV_EXPR_NONTYPE_ARG (expr) = true;
return expr;
}
expr = maybe_constant_value (expr, NULL_TREE,
/*manifestly_const_eval=*/true);
expr = convert_from_reference (expr);
}
else if (TYPE_PTR_OR_PTRMEM_P (type))
{
tree folded = maybe_constant_value (expr, NULL_TREE,
/*manifestly_const_eval=*/true);
if (TYPE_PTR_P (type) ? integer_zerop (folded)
: null_member_pointer_value_p (folded))
expr = folded;
}
}
if (TYPE_REF_P (type))
expr = mark_lvalue_use (expr);
else
expr = mark_rvalue_use (expr);
/* HACK: Due to double coercion, we can get a
NOP_EXPR<REFERENCE_TYPE>(ADDR_EXPR<POINTER_TYPE> (arg)) here,
which is the tree that we built on the first call (see
below when coercing to reference to object or to reference to
function). We just strip everything and get to the arg.
See g++.old-deja/g++.oliva/template4.C and g++.dg/template/nontype9.C
for examples. */
if (TYPE_REF_OBJ_P (type) || TYPE_REFFN_P (type))
{
tree probe_type, probe = expr;
if (REFERENCE_REF_P (probe))
probe = TREE_OPERAND (probe, 0);
probe_type = TREE_TYPE (probe);
if (TREE_CODE (probe) == NOP_EXPR)
{
/* ??? Maybe we could use convert_from_reference here, but we
would need to relax its constraints because the NOP_EXPR
could actually change the type to something more cv-qualified,
and this is not folded by convert_from_reference. */
tree addr = TREE_OPERAND (probe, 0);
if (TYPE_REF_P (probe_type)
&& TREE_CODE (addr) == ADDR_EXPR
&& TYPE_PTR_P (TREE_TYPE (addr))
&& (same_type_ignoring_top_level_qualifiers_p
(TREE_TYPE (probe_type),
TREE_TYPE (TREE_TYPE (addr)))))
{
expr = TREE_OPERAND (addr, 0);
expr_type = TREE_TYPE (probe_type);
}
}
}
/* [temp.arg.nontype]/5, bullet 1
For a non-type template-parameter of integral or enumeration type,
integral promotions (_conv.prom_) and integral conversions
(_conv.integral_) are applied. */
if (INTEGRAL_OR_ENUMERATION_TYPE_P (type))
{
if (cxx_dialect < cxx11)
{
tree t = build_converted_constant_expr (type, expr, complain);
t = maybe_constant_value (t);
if (t != error_mark_node)
expr = t;
}
if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr)))
return error_mark_node;
/* Notice that there are constant expressions like '4 % 0' which
do not fold into integer constants. */
if (TREE_CODE (expr) != INTEGER_CST && !val_dep_p)
{
if (complain & tf_error)
{
int errs = errorcount, warns = warningcount + werrorcount;
if (!require_potential_constant_expression (expr))
expr = error_mark_node;
else
expr = cxx_constant_value (expr);
if (errorcount > errs || warningcount + werrorcount > warns)
inform (loc, "in template argument for type %qT", type);
if (expr == error_mark_node)
return NULL_TREE;
/* else cxx_constant_value complained but gave us
a real constant, so go ahead. */
if (TREE_CODE (expr) != INTEGER_CST)
{
/* Some assemble time constant expressions like
(intptr_t)&&lab1 - (intptr_t)&&lab2 or
4 + (intptr_t)&&var satisfy reduced_constant_expression_p
as we can emit them into .rodata initializers of
variables, yet they can't fold into an INTEGER_CST at
compile time. Refuse them here. */
gcc_checking_assert (reduced_constant_expression_p (expr));
error_at (loc, "template argument %qE for type %qT not "
"a constant integer", expr, type);
return NULL_TREE;
}
}
else
return NULL_TREE;
}
/* Avoid typedef problems. */
if (TREE_TYPE (expr) != type)
expr = fold_convert (type, expr);
}
/* [temp.arg.nontype]/5, bullet 2
For a non-type template-parameter of type pointer to object,
qualification conversions (_conv.qual_) and the array-to-pointer
conversion (_conv.array_) are applied. */
else if (TYPE_PTROBV_P (type))
{
tree decayed = expr;
/* Look through any NOP_EXPRs around an ADDR_EXPR, whether they come from
decay_conversion or an explicit cast. If it's a problematic cast,
we'll complain about it below. */
if (TREE_CODE (expr) == NOP_EXPR)
{
tree probe = expr;
STRIP_NOPS (probe);
if (TREE_CODE (probe) == ADDR_EXPR
&& TYPE_PTR_P (TREE_TYPE (probe)))
{
expr = probe;
expr_type = TREE_TYPE (expr);
}
}
/* [temp.arg.nontype]/1 (TC1 version, DR 49):
A template-argument for a non-type, non-template template-parameter
shall be one of: [...]
-- the name of a non-type template-parameter;
-- the address of an object or function with external linkage, [...]
expressed as "& id-expression" where the & is optional if the name
refers to a function or array, or if the corresponding
template-parameter is a reference.
Here, we do not care about functions, as they are invalid anyway
for a parameter of type pointer-to-object. */
if (val_dep_p)
/* Non-type template parameters are OK. */
;
else if (cxx_dialect >= cxx11 && integer_zerop (expr))
/* Null pointer values are OK in C++11. */;
else if (TREE_CODE (expr) != ADDR_EXPR
&& !INDIRECT_TYPE_P (expr_type))
/* Other values, like integer constants, might be valid
non-type arguments of some other type. */
return error_mark_node;
else if (invalid_tparm_referent_p (type, expr, complain))
return NULL_TREE;
expr = decayed;
expr = perform_qualification_conversions (type, expr);
if (expr == error_mark_node)
return error_mark_node;
}
/* [temp.arg.nontype]/5, bullet 3
For a non-type template-parameter of type reference to object, no
conversions apply. The type referred to by the reference may be more
cv-qualified than the (otherwise identical) type of the
template-argument. The template-parameter is bound directly to the
template-argument, which must be an lvalue. */
else if (TYPE_REF_OBJ_P (type))
{
if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (type),
expr_type))
return error_mark_node;
if (!at_least_as_qualified_p (TREE_TYPE (type), expr_type))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because of conflicts in cv-qualification", expr, type);
return NULL_TREE;
}
if (!lvalue_p (expr))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because it is not an lvalue", expr, type);
return NULL_TREE;
}
/* [temp.arg.nontype]/1
A template-argument for a non-type, non-template template-parameter
shall be one of: [...]
-- the address of an object or function with external linkage. */
if (INDIRECT_REF_P (expr)
&& TYPE_REF_OBJ_P (TREE_TYPE (TREE_OPERAND (expr, 0))))
{
expr = TREE_OPERAND (expr, 0);
if (DECL_P (expr))
{
if (complain & tf_error)
error ("%q#D is not a valid template argument for type %qT "
"because a reference variable does not have a constant "
"address", expr, type);
return NULL_TREE;
}
}
if (TYPE_REF_OBJ_P (TREE_TYPE (expr)) && val_dep_p)
/* OK, dependent reference. We don't want to ask whether a DECL is
itself value-dependent, since what we want here is its address. */;
else
{
expr = build_address (expr);
if (invalid_tparm_referent_p (type, expr, complain))
return NULL_TREE;
}
if (!same_type_p (type, TREE_TYPE (expr)))
expr = build_nop (type, expr);
}
/* [temp.arg.nontype]/5, bullet 4
For a non-type template-parameter of type pointer to function, only
the function-to-pointer conversion (_conv.func_) is applied. If the
template-argument represents a set of overloaded functions (or a
pointer to such), the matching function is selected from the set
(_over.over_). */
else if (TYPE_PTRFN_P (type))
{
/* If the argument is a template-id, we might not have enough
context information to decay the pointer. */
if (!type_unknown_p (expr_type))
{
expr = decay_conversion (expr, complain);
if (expr == error_mark_node)
return error_mark_node;
}
if (cxx_dialect >= cxx11 && integer_zerop (expr))
/* Null pointer values are OK in C++11. */
return perform_qualification_conversions (type, expr);
expr = convert_nontype_argument_function (type, expr, complain);
if (!expr || expr == error_mark_node)
return expr;
}
/* [temp.arg.nontype]/5, bullet 5
For a non-type template-parameter of type reference to function, no
conversions apply. If the template-argument represents a set of
overloaded functions, the matching function is selected from the set
(_over.over_). */
else if (TYPE_REFFN_P (type))
{
if (TREE_CODE (expr) == ADDR_EXPR)
{
if (complain & tf_error)
{
error ("%qE is not a valid template argument for type %qT "
"because it is a pointer", expr, type);
inform (input_location, "try using %qE instead",
TREE_OPERAND (expr, 0));
}
return NULL_TREE;
}
expr = convert_nontype_argument_function (type, expr, complain);
if (!expr || expr == error_mark_node)
return expr;
}
/* [temp.arg.nontype]/5, bullet 6
For a non-type template-parameter of type pointer to member function,
no conversions apply. If the template-argument represents a set of
overloaded member functions, the matching member function is selected
from the set (_over.over_). */
else if (TYPE_PTRMEMFUNC_P (type))
{
expr = instantiate_type (type, expr, tf_none);
if (expr == error_mark_node)
return error_mark_node;
/* [temp.arg.nontype] bullet 1 says the pointer to member
expression must be a pointer-to-member constant. */
if (!val_dep_p
&& !check_valid_ptrmem_cst_expr (type, expr, complain))
return NULL_TREE;
/* Repeated conversion can't deal with a conversion that turns PTRMEM_CST
into a CONSTRUCTOR, so build up a new PTRMEM_CST instead. */
if (fnptr_conv_p (type, TREE_TYPE (expr)))
expr = make_ptrmem_cst (type, PTRMEM_CST_MEMBER (expr));
}
/* [temp.arg.nontype]/5, bullet 7
For a non-type template-parameter of type pointer to data member,
qualification conversions (_conv.qual_) are applied. */
else if (TYPE_PTRDATAMEM_P (type))
{
/* [temp.arg.nontype] bullet 1 says the pointer to member
expression must be a pointer-to-member constant. */
if (!val_dep_p
&& !check_valid_ptrmem_cst_expr (type, expr, complain))
return NULL_TREE;
expr = perform_qualification_conversions (type, expr);
if (expr == error_mark_node)
return expr;
}
else if (NULLPTR_TYPE_P (type))
{
if (!NULLPTR_TYPE_P (TREE_TYPE (expr)))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because it is of type %qT", expr, type, TREE_TYPE (expr));
return NULL_TREE;
}
return expr;
}
else if (CLASS_TYPE_P (type))
{
/* Replace the argument with a reference to the corresponding template
parameter object. */
if (!val_dep_p)
expr = get_template_parm_object (expr, complain);
if (expr == error_mark_node)
return NULL_TREE;
}
/* A template non-type parameter must be one of the above. */
else
gcc_unreachable ();
/* Sanity check: did we actually convert the argument to the
right type? */
gcc_assert (same_type_ignoring_top_level_qualifiers_p
(type, TREE_TYPE (expr)));
return convert_from_reference (expr);
}
/* Subroutine of coerce_template_template_parms, which returns 1 if
PARM_PARM and ARG_PARM match using the rule for the template
parameters of template template parameters. Both PARM and ARG are
template parameters; the rest of the arguments are the same as for
coerce_template_template_parms.
*/
static int
coerce_template_template_parm (tree parm,
tree arg,
tsubst_flags_t complain,
tree in_decl,
tree outer_args)
{
if (arg == NULL_TREE || error_operand_p (arg)
|| parm == NULL_TREE || error_operand_p (parm))
return 0;
if (TREE_CODE (arg) != TREE_CODE (parm))
return 0;
switch (TREE_CODE (parm))
{
case TEMPLATE_DECL:
/* We encounter instantiations of templates like
template <template <template <class> class> class TT>
class C; */
{
tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm);
tree argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg);
if (!coerce_template_template_parms
(parmparm, argparm, complain, in_decl, outer_args))
return 0;
}
/* Fall through. */
case TYPE_DECL:
if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (arg))
&& !TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))
/* Argument is a parameter pack but parameter is not. */
return 0;
break;
case PARM_DECL:
/* The tsubst call is used to handle cases such as
template <int> class C {};
template <class T, template <T> class TT> class D {};
D<int, C> d;
i.e. the parameter list of TT depends on earlier parameters. */
if (!uses_template_parms (TREE_TYPE (arg)))
{
tree t = tsubst (TREE_TYPE (parm), outer_args, complain, in_decl);
if (!uses_template_parms (t)
&& !same_type_p (t, TREE_TYPE (arg)))
return 0;
}
if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (arg))
&& !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
/* Argument is a parameter pack but parameter is not. */
return 0;
break;
default:
gcc_unreachable ();
}
return 1;
}
/* Coerce template argument list ARGLIST for use with template
template-parameter TEMPL. */
static tree
coerce_template_args_for_ttp (tree templ, tree arglist,
tsubst_flags_t complain)
{
/* Consider an example where a template template parameter declared as
template <class T, class U = std::allocator<T> > class TT
The template parameter level of T and U are one level larger than
of TT. To proper process the default argument of U, say when an
instantiation `TT<int>' is seen, we need to build the full
arguments containing {int} as the innermost level. Outer levels,
available when not appearing as default template argument, can be
obtained from the arguments of the enclosing template.
Suppose that TT is later substituted with std::vector. The above
instantiation is `TT<int, std::allocator<T> >' with TT at
level 1, and T at level 2, while the template arguments at level 1
becomes {std::vector} and the inner level 2 is {int}. */
tree outer = DECL_CONTEXT (templ);
if (outer)
outer = generic_targs_for (outer);
else if (current_template_parms)
{
/* This is an argument of the current template, so we haven't set
DECL_CONTEXT yet. */
tree relevant_template_parms;
/* Parameter levels that are greater than the level of the given
template template parm are irrelevant. */
relevant_template_parms = current_template_parms;
while (TMPL_PARMS_DEPTH (relevant_template_parms)
!= TEMPLATE_TYPE_LEVEL (TREE_TYPE (templ)))
relevant_template_parms = TREE_CHAIN (relevant_template_parms);
outer = template_parms_to_args (relevant_template_parms);
}
if (outer)
arglist = add_to_template_args (outer, arglist);
tree parmlist = DECL_INNERMOST_TEMPLATE_PARMS (templ);
return coerce_template_parms (parmlist, arglist, templ,
complain,
/*require_all_args=*/true,
/*use_default_args=*/true);
}
/* A cache of template template parameters with match-all default
arguments. */
static GTY((deletable)) hash_map<tree,tree> *defaulted_ttp_cache;
/* T is a bound template template-parameter. Copy its arguments into default
arguments of the template template-parameter's template parameters. */
static tree
add_defaults_to_ttp (tree otmpl)
{
if (tree *c = hash_map_safe_get (defaulted_ttp_cache, otmpl))
return *c;
tree ntmpl = copy_node (otmpl);
tree ntype = copy_node (TREE_TYPE (otmpl));
TYPE_STUB_DECL (ntype) = TYPE_NAME (ntype) = ntmpl;
TYPE_MAIN_VARIANT (ntype) = ntype;
TYPE_POINTER_TO (ntype) = TYPE_REFERENCE_TO (ntype) = NULL_TREE;
TYPE_NAME (ntype) = ntmpl;
SET_TYPE_STRUCTURAL_EQUALITY (ntype);
tree idx = TEMPLATE_TYPE_PARM_INDEX (ntype)
= copy_node (TEMPLATE_TYPE_PARM_INDEX (ntype));
TEMPLATE_PARM_DECL (idx) = ntmpl;
TREE_TYPE (ntmpl) = TREE_TYPE (idx) = ntype;
tree oparms = DECL_TEMPLATE_PARMS (otmpl);
tree parms = DECL_TEMPLATE_PARMS (ntmpl) = copy_node (oparms);
TREE_CHAIN (parms) = TREE_CHAIN (oparms);
tree vec = TREE_VALUE (parms) = copy_node (TREE_VALUE (parms));
for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i)
{
tree o = TREE_VEC_ELT (vec, i);
if (!template_parameter_pack_p (TREE_VALUE (o)))
{
tree n = TREE_VEC_ELT (vec, i) = copy_node (o);
TREE_PURPOSE (n) = any_targ_node;
}
}
hash_map_safe_put<hm_ggc> (defaulted_ttp_cache, otmpl, ntmpl);
return ntmpl;
}
/* ARG is a bound potential template template-argument, and PARGS is a list
of arguments for the corresponding template template-parameter. Adjust
PARGS as appropriate for application to ARG's template, and if ARG is a
BOUND_TEMPLATE_TEMPLATE_PARM, possibly adjust it to add default template
arguments to the template template parameter. */
static tree
coerce_ttp_args_for_tta (tree& arg, tree pargs, tsubst_flags_t complain)
{
++processing_template_decl;
tree arg_tmpl = TYPE_TI_TEMPLATE (arg);
if (DECL_TEMPLATE_TEMPLATE_PARM_P (arg_tmpl))
{
/* When comparing two template template-parameters in partial ordering,
rewrite the one currently being used as an argument to have default
arguments for all parameters. */
arg_tmpl = add_defaults_to_ttp (arg_tmpl);
pargs = coerce_template_args_for_ttp (arg_tmpl, pargs, complain);
if (pargs != error_mark_node)
arg = bind_template_template_parm (TREE_TYPE (arg_tmpl),
TYPE_TI_ARGS (arg));
}
else
{
tree aparms
= INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (arg_tmpl));
pargs = coerce_template_parms (aparms, pargs, arg_tmpl, complain,
/*require_all*/true,
/*use_default*/true);
}
--processing_template_decl;
return pargs;
}
/* Subroutine of unify for the case when PARM is a
BOUND_TEMPLATE_TEMPLATE_PARM. */
static int
unify_bound_ttp_args (tree tparms, tree targs, tree parm, tree& arg,
bool explain_p)
{
tree parmvec = TYPE_TI_ARGS (parm);
tree argvec = INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (arg));
/* The template template parm might be variadic and the argument
not, so flatten both argument lists. */
parmvec = expand_template_argument_pack (parmvec);
argvec = expand_template_argument_pack (argvec);
if (flag_new_ttp)
{
/* In keeping with P0522R0, adjust P's template arguments
to apply to A's template; then flatten it again. */
tree nparmvec = coerce_ttp_args_for_tta (arg, parmvec, tf_none);
nparmvec = expand_template_argument_pack (nparmvec);
if (unify (tparms, targs, nparmvec, argvec,
UNIFY_ALLOW_NONE, explain_p))
return 1;
/* If the P0522 adjustment eliminated a pack expansion, deduce
empty packs. */
if (flag_new_ttp
&& TREE_VEC_LENGTH (nparmvec) < TREE_VEC_LENGTH (parmvec)
&& unify_pack_expansion (tparms, targs, parmvec, argvec,
DEDUCE_EXACT, /*sub*/true, explain_p))
return 1;
}
else
{
/* Deduce arguments T, i from TT<T> or TT<i>.
We check each element of PARMVEC and ARGVEC individually
rather than the whole TREE_VEC since they can have
different number of elements, which is allowed under N2555. */
int len = TREE_VEC_LENGTH (parmvec);
/* Check if the parameters end in a pack, making them
variadic. */
int parm_variadic_p = 0;
if (len > 0
&& PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, len - 1)))
parm_variadic_p = 1;
for (int i = 0; i < len - parm_variadic_p; ++i)
/* If the template argument list of P contains a pack
expansion that is not the last template argument, the
entire template argument list is a non-deduced
context. */
if (PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, i)))
return unify_success (explain_p);
if (TREE_VEC_LENGTH (argvec) < len - parm_variadic_p)
return unify_too_few_arguments (explain_p,
TREE_VEC_LENGTH (argvec), len);
for (int i = 0; i < len - parm_variadic_p; ++i)
if (unify (tparms, targs,
TREE_VEC_ELT (parmvec, i),
TREE_VEC_ELT (argvec, i),
UNIFY_ALLOW_NONE, explain_p))
return 1;
if (parm_variadic_p
&& unify_pack_expansion (tparms, targs,
parmvec, argvec,
DEDUCE_EXACT,
/*subr=*/true, explain_p))
return 1;
}
return 0;
}
/* Return 1 if PARM_PARMS and ARG_PARMS matches using rule for
template template parameters. Both PARM_PARMS and ARG_PARMS are
vectors of TREE_LIST nodes containing TYPE_DECL, TEMPLATE_DECL
or PARM_DECL.
Consider the example:
template <class T> class A;
template<template <class U> class TT> class B;
For B<A>, PARM_PARMS are the parameters to TT, while ARG_PARMS are
the parameters to A, and OUTER_ARGS contains A. */
static int
coerce_template_template_parms (tree parm_parms,
tree arg_parms,
tsubst_flags_t complain,
tree in_decl,
tree outer_args)
{
int nparms, nargs, i;
tree parm, arg;
int variadic_p = 0;
gcc_assert (TREE_CODE (parm_parms) == TREE_VEC);
gcc_assert (TREE_CODE (arg_parms) == TREE_VEC);
nparms = TREE_VEC_LENGTH (parm_parms);
nargs = TREE_VEC_LENGTH (arg_parms);
if (flag_new_ttp)
{
/* P0522R0: A template template-parameter P is at least as specialized as
a template template-argument A if, given the following rewrite to two
function templates, the function template corresponding to P is at
least as specialized as the function template corresponding to A
according to the partial ordering rules for function templates
([temp.func.order]). Given an invented class template X with the
template parameter list of A (including default arguments):
* Each of the two function templates has the same template parameters,
respectively, as P or A.
* Each function template has a single function parameter whose type is
a specialization of X with template arguments corresponding to the
template parameters from the respective function template where, for
each template parameter PP in the template parameter list of the
function template, a corresponding template argument AA is formed. If
PP declares a parameter pack, then AA is the pack expansion
PP... ([temp.variadic]); otherwise, AA is the id-expression PP.
If the rewrite produces an invalid type, then P is not at least as
specialized as A. */
/* So coerce P's args to apply to A's parms, and then deduce between A's
args and the converted args. If that succeeds, A is at least as
specialized as P, so they match.*/
tree pargs = template_parms_level_to_args (parm_parms);
pargs = add_outermost_template_args (outer_args, pargs);
++processing_template_decl;
pargs = coerce_template_parms (arg_parms, pargs, NULL_TREE, tf_none,
/*require_all*/true, /*use_default*/true);
--processing_template_decl;
if (pargs != error_mark_node)
{
tree targs = make_tree_vec (nargs);
tree aargs = template_parms_level_to_args (arg_parms);
if (!unify (arg_parms, targs, aargs, pargs, UNIFY_ALLOW_NONE,
/*explain*/false))
return 1;
}
}
/* Determine whether we have a parameter pack at the end of the
template template parameter's template parameter list. */
if (TREE_VEC_ELT (parm_parms, nparms - 1) != error_mark_node)
{
parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, nparms - 1));
if (error_operand_p (parm))
return 0;
switch (TREE_CODE (parm))
{
case TEMPLATE_DECL:
case TYPE_DECL:
if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))
variadic_p = 1;
break;
case PARM_DECL:
if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
variadic_p = 1;
break;
default:
gcc_unreachable ();
}
}
if (nargs != nparms
&& !(variadic_p && nargs >= nparms - 1))
return 0;
/* Check all of the template parameters except the parameter pack at
the end (if any). */
for (i = 0; i < nparms - variadic_p; ++i)
{
if (TREE_VEC_ELT (parm_parms, i) == error_mark_node
|| TREE_VEC_ELT (arg_parms, i) == error_mark_node)
continue;
parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i));
arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i));
if (!coerce_template_template_parm (parm, arg, complain, in_decl,
outer_args))
return 0;
}
if (variadic_p)
{
/* Check each of the template parameters in the template
argument against the template parameter pack at the end of
the template template parameter. */
if (TREE_VEC_ELT (parm_parms, i) == error_mark_node)
return 0;
parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i));
for (; i < nargs; ++i)
{
if (TREE_VEC_ELT (arg_parms, i) == error_mark_node)
continue;
arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i));
if (!coerce_template_template_parm (parm, arg, complain, in_decl,
outer_args))
return 0;
}
}
return 1;
}
/* Verifies that the deduced template arguments (in TARGS) for the
template template parameters (in TPARMS) represent valid bindings,
by comparing the template parameter list of each template argument
to the template parameter list of its corresponding template
template parameter, in accordance with DR150. This
routine can only be called after all template arguments have been
deduced. It will return TRUE if all of the template template
parameter bindings are okay, FALSE otherwise. */
bool
template_template_parm_bindings_ok_p (tree tparms, tree targs)
{
int i, ntparms = TREE_VEC_LENGTH (tparms);
bool ret = true;
/* We're dealing with template parms in this process. */
++processing_template_decl;
targs = INNERMOST_TEMPLATE_ARGS (targs);
for (i = 0; i < ntparms; ++i)
{
tree tparm = TREE_VALUE (TREE_VEC_ELT (tparms, i));
tree targ = TREE_VEC_ELT (targs, i);
if (TREE_CODE (tparm) == TEMPLATE_DECL && targ)
{
tree packed_args = NULL_TREE;
int idx, len = 1;
if (ARGUMENT_PACK_P (targ))
{
/* Look inside the argument pack. */
packed_args = ARGUMENT_PACK_ARGS (targ);
len = TREE_VEC_LENGTH (packed_args);
}
for (idx = 0; idx < len; ++idx)
{
tree targ_parms = NULL_TREE;
if (packed_args)
/* Extract the next argument from the argument
pack. */
targ = TREE_VEC_ELT (packed_args, idx);
if (PACK_EXPANSION_P (targ))
/* Look at the pattern of the pack expansion. */
targ = PACK_EXPANSION_PATTERN (targ);
/* Extract the template parameters from the template
argument. */
if (TREE_CODE (targ) == TEMPLATE_DECL)
targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (targ);
else if (TREE_CODE (targ) == TEMPLATE_TEMPLATE_PARM)
targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (TYPE_NAME (targ));
/* Verify that we can coerce the template template
parameters from the template argument to the template
parameter. This requires an exact match. */
if (targ_parms
&& !coerce_template_template_parms
(DECL_INNERMOST_TEMPLATE_PARMS (tparm),
targ_parms,
tf_none,
tparm,
targs))
{
ret = false;
goto out;
}
}
}
}
out:
--processing_template_decl;
return ret;
}
/* Since type attributes aren't mangled, we need to strip them from
template type arguments. */
tree
canonicalize_type_argument (tree arg, tsubst_flags_t complain)
{
if (!arg || arg == error_mark_node || arg == TYPE_CANONICAL (arg))
return arg;
bool removed_attributes = false;
tree canon = strip_typedefs (arg, &removed_attributes);
if (removed_attributes
&& (complain & tf_warning))
warning (OPT_Wignored_attributes,
"ignoring attributes on template argument %qT", arg);
return canon;
}
/* And from inside dependent non-type arguments like sizeof(Type). */
static tree
canonicalize_expr_argument (tree arg, tsubst_flags_t complain)
{
if (!arg || arg == error_mark_node)
return arg;
bool removed_attributes = false;
tree canon = strip_typedefs_expr (arg, &removed_attributes);
if (removed_attributes
&& (complain & tf_warning))
warning (OPT_Wignored_attributes,
"ignoring attributes in template argument %qE", arg);
return canon;
}
/* A template declaration can be substituted for a constrained
template template parameter only when the argument is no more
constrained than the parameter. */
static bool
is_compatible_template_arg (tree parm, tree arg)
{
tree parm_cons = get_constraints (parm);
/* For now, allow constrained template template arguments
and unconstrained template template parameters. */
if (parm_cons == NULL_TREE)
return true;
/* If the template parameter is constrained, we need to rewrite its
constraints in terms of the ARG's template parameters. This ensures
that all of the template parameter types will have the same depth.
Note that this is only valid when coerce_template_template_parm is
true for the innermost template parameters of PARM and ARG. In other
words, because coercion is successful, this conversion will be valid. */
tree new_args = NULL_TREE;
if (parm_cons)
{
tree aparms = DECL_INNERMOST_TEMPLATE_PARMS (arg);
new_args = template_parms_level_to_args (aparms);
parm_cons = tsubst_constraint_info (parm_cons, new_args,
tf_none, NULL_TREE);
if (parm_cons == error_mark_node)
return false;
}
return weakly_subsumes (parm_cons, new_args, arg);
}
// Convert a placeholder argument into a binding to the original
// parameter. The original parameter is saved as the TREE_TYPE of
// ARG.
static inline tree
convert_wildcard_argument (tree parm, tree arg)
{
TREE_TYPE (arg) = parm;
return arg;
}
/* We can't fully resolve ARG given as a non-type template argument to TYPE,
because one of them is dependent. But we need to represent the
conversion for the benefit of cp_tree_equal. */
static tree
maybe_convert_nontype_argument (tree type, tree arg)
{
/* Auto parms get no conversion. */
if (type_uses_auto (type))
return arg;
/* We don't need or want to add this conversion now if we're going to use the
argument for deduction. */
if (value_dependent_expression_p (arg))
return arg;
type = cv_unqualified (type);
tree argtype = TREE_TYPE (arg);
if (same_type_p (type, argtype))
return arg;
arg = build1 (IMPLICIT_CONV_EXPR, type, arg);
IMPLICIT_CONV_EXPR_NONTYPE_ARG (arg) = true;
return arg;
}
/* Convert the indicated template ARG as necessary to match the
indicated template PARM. Returns the converted ARG, or
error_mark_node if the conversion was unsuccessful. Error and
warning messages are issued under control of COMPLAIN. This
conversion is for the Ith parameter in the parameter list. ARGS is
the full set of template arguments deduced so far. */
static tree
convert_template_argument (tree parm,
tree arg,
tree args,
tsubst_flags_t complain,
int i,
tree in_decl)
{
tree orig_arg;
tree val;
int is_type, requires_type, is_tmpl_type, requires_tmpl_type;
if (parm == error_mark_node || error_operand_p (arg))
return error_mark_node;
/* Trivially convert placeholders. */
if (TREE_CODE (arg) == WILDCARD_DECL)
return convert_wildcard_argument (parm, arg);
if (arg == any_targ_node)
return arg;
if (TREE_CODE (arg) == TREE_LIST
&& TREE_CODE (TREE_VALUE (arg)) == OFFSET_REF)
{
/* The template argument was the name of some
member function. That's usually
invalid, but static members are OK. In any
case, grab the underlying fields/functions
and issue an error later if required. */
TREE_TYPE (arg) = unknown_type_node;
}
orig_arg = arg;
requires_tmpl_type = TREE_CODE (parm) == TEMPLATE_DECL;
requires_type = (TREE_CODE (parm) == TYPE_DECL
|| requires_tmpl_type);
/* When determining whether an argument pack expansion is a template,
look at the pattern. */
if (PACK_EXPANSION_P (arg))
arg = PACK_EXPANSION_PATTERN (arg);
/* Deal with an injected-class-name used as a template template arg. */
if (requires_tmpl_type && CLASS_TYPE_P (arg))
{
tree t = maybe_get_template_decl_from_type_decl (TYPE_NAME (arg));
if (TREE_CODE (t) == TEMPLATE_DECL)
{
if (cxx_dialect >= cxx11)
/* OK under DR 1004. */;
else if (complain & tf_warning_or_error)
pedwarn (input_location, OPT_Wpedantic, "injected-class-name %qD"
" used as template template argument", TYPE_NAME (arg));
else if (flag_pedantic_errors)
t = arg;
arg = t;
}
}
is_tmpl_type =
((TREE_CODE (arg) == TEMPLATE_DECL
&& TREE_CODE (DECL_TEMPLATE_RESULT (arg)) == TYPE_DECL)
|| (requires_tmpl_type && TREE_CODE (arg) == TYPE_ARGUMENT_PACK)
|| TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE);
if (is_tmpl_type
&& (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE))
arg = TYPE_STUB_DECL (arg);
is_type = TYPE_P (arg) || is_tmpl_type;
if (requires_type && ! is_type && TREE_CODE (arg) == SCOPE_REF
&& TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_TYPE_PARM)
{
if (TREE_CODE (TREE_OPERAND (arg, 1)) == BIT_NOT_EXPR)
{
if (complain & tf_error)
error ("invalid use of destructor %qE as a type", orig_arg);
return error_mark_node;
}
permerror (input_location,
"to refer to a type member of a template parameter, "
"use %<typename %E%>", orig_arg);
orig_arg = make_typename_type (TREE_OPERAND (arg, 0),
TREE_OPERAND (arg, 1),
typename_type,
complain);
arg = orig_arg;
is_type = 1;
}
if (is_type != requires_type)
{
if (in_decl)
{
if (complain & tf_error)
{
error ("type/value mismatch at argument %d in template "
"parameter list for %qD",
i + 1, in_decl);
if (is_type)
{
/* The template argument is a type, but we're expecting
an expression. */
inform (input_location,
" expected a constant of type %qT, got %qT",
TREE_TYPE (parm),
(DECL_P (arg) ? DECL_NAME (arg) : orig_arg));
/* [temp.arg]/2: "In a template-argument, an ambiguity
between a type-id and an expression is resolved to a
type-id, regardless of the form of the corresponding
template-parameter." So give the user a clue. */
if (TREE_CODE (arg) == FUNCTION_TYPE)
inform (input_location, " ambiguous template argument "
"for non-type template parameter is treated as "
"function type");
}
else if (requires_tmpl_type)
inform (input_location,
" expected a class template, got %qE", orig_arg);
else
inform (input_location,
" expected a type, got %qE", orig_arg);
}
}
return error_mark_node;
}
if (is_tmpl_type ^ requires_tmpl_type)
{
if (in_decl && (complain & tf_error))
{
error ("type/value mismatch at argument %d in template "
"parameter list for %qD",
i + 1, in_decl);
if (is_tmpl_type)
inform (input_location,
" expected a type, got %qT", DECL_NAME (arg));
else
inform (input_location,
" expected a class template, got %qT", orig_arg);
}
return error_mark_node;
}
if (template_parameter_pack_p (parm) && ARGUMENT_PACK_P (orig_arg))
/* We already did the appropriate conversion when packing args. */
val = orig_arg;
else if (is_type)
{
if (requires_tmpl_type)
{
if (TREE_CODE (TREE_TYPE (arg)) == UNBOUND_CLASS_TEMPLATE)
/* The number of argument required is not known yet.
Just accept it for now. */
val = orig_arg;
else
{
tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm);
tree argparm;
/* Strip alias templates that are equivalent to another
template. */
arg = get_underlying_template (arg);
argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg);
if (coerce_template_template_parms (parmparm, argparm,
complain, in_decl,
args))
{
val = arg;
/* TEMPLATE_TEMPLATE_PARM node is preferred over
TEMPLATE_DECL. */
if (val != error_mark_node)
{
if (DECL_TEMPLATE_TEMPLATE_PARM_P (val))
val = TREE_TYPE (val);
if (TREE_CODE (orig_arg) == TYPE_PACK_EXPANSION)
val = make_pack_expansion (val, complain);
}
}
else
{
if (in_decl && (complain & tf_error))
{
error ("type/value mismatch at argument %d in "
"template parameter list for %qD",
i + 1, in_decl);
inform (input_location,
" expected a template of type %qD, got %qT",
parm, orig_arg);
}
val = error_mark_node;
}
// Check that the constraints are compatible before allowing the
// substitution.
if (val != error_mark_node)
if (!is_compatible_template_arg (parm, arg))
{
if (in_decl && (complain & tf_error))
{
error ("constraint mismatch at argument %d in "
"template parameter list for %qD",
i + 1, in_decl);
inform (input_location, " expected %qD but got %qD",
parm, arg);
}
val = error_mark_node;
}
}
}
else
val = orig_arg;
/* We only form one instance of each template specialization.
Therefore, if we use a non-canonical variant (i.e., a
typedef), any future messages referring to the type will use
the typedef, which is confusing if those future uses do not
themselves also use the typedef. */
if (TYPE_P (val))
val = canonicalize_type_argument (val, complain);
}
else
{
tree t = TREE_TYPE (parm);
if (TEMPLATE_PARM_LEVEL (get_template_parm_index (parm))
> TMPL_ARGS_DEPTH (args))
/* We don't have enough levels of args to do any substitution. This
can happen in the context of -fnew-ttp-matching. */;
else if (tree a = type_uses_auto (t))
{
t = do_auto_deduction (t, arg, a, complain, adc_unify, args);
if (t == error_mark_node)
return error_mark_node;
}
else
t = tsubst (t, args, complain, in_decl);
if (invalid_nontype_parm_type_p (t, complain))
return error_mark_node;
if (t != TREE_TYPE (parm))
t = canonicalize_type_argument (t, complain);
if (!type_dependent_expression_p (orig_arg)
&& !uses_template_parms (t))
/* We used to call digest_init here. However, digest_init
will report errors, which we don't want when complain
is zero. More importantly, digest_init will try too
hard to convert things: for example, `0' should not be
converted to pointer type at this point according to
the standard. Accepting this is not merely an
extension, since deciding whether or not these
conversions can occur is part of determining which
function template to call, or whether a given explicit
argument specification is valid. */
val = convert_nontype_argument (t, orig_arg, complain);
else
{
val = canonicalize_expr_argument (orig_arg, complain);
val = maybe_convert_nontype_argument (t, val);
}
if (val == NULL_TREE)
val = error_mark_node;
else if (val == error_mark_node && (complain & tf_error))
error_at (cp_expr_loc_or_input_loc (orig_arg),
"could not convert template argument %qE from %qT to %qT",
orig_arg, TREE_TYPE (orig_arg), t);
if (INDIRECT_REF_P (val))
{
/* Reject template arguments that are references to built-in
functions with no library fallbacks. */
const_tree inner = TREE_OPERAND (val, 0);
const_tree innertype = TREE_TYPE (inner);
if (innertype
&& TYPE_REF_P (innertype)
&& TREE_CODE (TREE_TYPE (innertype)) == FUNCTION_TYPE
&& TREE_OPERAND_LENGTH (inner) > 0
&& reject_gcc_builtin (TREE_OPERAND (inner, 0)))
return error_mark_node;
}
if (TREE_CODE (val) == SCOPE_REF)
{
/* Strip typedefs from the SCOPE_REF. */
tree type = canonicalize_type_argument (TREE_TYPE (val), complain);
tree scope = canonicalize_type_argument (TREE_OPERAND (val, 0),
complain);
val = build_qualified_name (type, scope, TREE_OPERAND (val, 1),
QUALIFIED_NAME_IS_TEMPLATE (val));
}
}
return val;
}
/* Coerces the remaining template arguments in INNER_ARGS (from
ARG_IDX to the end) into the parameter pack at PARM_IDX in PARMS.
Returns the coerced argument pack. PARM_IDX is the position of this
parameter in the template parameter list. ARGS is the original
template argument list. */
static tree
coerce_template_parameter_pack (tree parms,
int parm_idx,
tree args,
tree inner_args,
int arg_idx,
tree new_args,
int* lost,
tree in_decl,
tsubst_flags_t complain)
{
tree parm = TREE_VEC_ELT (parms, parm_idx);
int nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0;
tree packed_args;
tree argument_pack;
tree packed_parms = NULL_TREE;
if (arg_idx > nargs)
arg_idx = nargs;
if (tree packs = fixed_parameter_pack_p (TREE_VALUE (parm)))
{
/* When the template parameter is a non-type template parameter pack
or template template parameter pack whose type or template
parameters use parameter packs, we know exactly how many arguments
we are looking for. Build a vector of the instantiated decls for
these template parameters in PACKED_PARMS. */
/* We can't use make_pack_expansion here because it would interpret a
_DECL as a use rather than a declaration. */
tree decl = TREE_VALUE (parm);
tree exp = cxx_make_type (TYPE_PACK_EXPANSION);
SET_PACK_EXPANSION_PATTERN (exp, decl);
PACK_EXPANSION_PARAMETER_PACKS (exp) = packs;
SET_TYPE_STRUCTURAL_EQUALITY (exp);
TREE_VEC_LENGTH (args)--;
packed_parms = tsubst_pack_expansion (exp, args, complain, decl);
TREE_VEC_LENGTH (args)++;
if (packed_parms == error_mark_node)
return error_mark_node;
/* If we're doing a partial instantiation of a member template,
verify that all of the types used for the non-type
template parameter pack are, in fact, valid for non-type
template parameters. */
if (arg_idx < nargs
&& PACK_EXPANSION_P (TREE_VEC_ELT (inner_args, arg_idx)))
{
int j, len = TREE_VEC_LENGTH (packed_parms);
for (j = 0; j < len; ++j)
{
tree t = TREE_VEC_ELT (packed_parms, j);
if (TREE_CODE (t) == PARM_DECL
&& invalid_nontype_parm_type_p (TREE_TYPE (t), complain))
return error_mark_node;
}
/* We don't know how many args we have yet, just
use the unconverted ones for now. */
return NULL_TREE;
}
packed_args = make_tree_vec (TREE_VEC_LENGTH (packed_parms));
}
/* Check if we have a placeholder pack, which indicates we're
in the context of a introduction list. In that case we want
to match this pack to the single placeholder. */
else if (arg_idx < nargs
&& TREE_CODE (TREE_VEC_ELT (inner_args, arg_idx)) == WILDCARD_DECL
&& WILDCARD_PACK_P (TREE_VEC_ELT (inner_args, arg_idx)))
{
nargs = arg_idx + 1;
packed_args = make_tree_vec (1);
}
else
packed_args = make_tree_vec (nargs - arg_idx);
/* Convert the remaining arguments, which will be a part of the
parameter pack "parm". */
int first_pack_arg = arg_idx;
for (; arg_idx < nargs; ++arg_idx)
{
tree arg = TREE_VEC_ELT (inner_args, arg_idx);
tree actual_parm = TREE_VALUE (parm);
int pack_idx = arg_idx - first_pack_arg;
if (packed_parms)
{
/* Once we've packed as many args as we have types, stop. */
if (pack_idx >= TREE_VEC_LENGTH (packed_parms))
break;
else if (PACK_EXPANSION_P (arg))
/* We don't know how many args we have yet, just
use the unconverted ones for now. */
return NULL_TREE;
else
actual_parm = TREE_VEC_ELT (packed_parms, pack_idx);
}
if (arg == error_mark_node)
{
if (complain & tf_error)
error ("template argument %d is invalid", arg_idx + 1);
}
else
arg = convert_template_argument (actual_parm,
arg, new_args, complain, parm_idx,
in_decl);
if (arg == error_mark_node)
(*lost)++;
TREE_VEC_ELT (packed_args, pack_idx) = arg;
}
if (arg_idx - first_pack_arg < TREE_VEC_LENGTH (packed_args)
&& TREE_VEC_LENGTH (packed_args) > 0)
{
if (complain & tf_error)
error ("wrong number of template arguments (%d, should be %d)",
arg_idx - first_pack_arg, TREE_VEC_LENGTH (packed_args));
return error_mark_node;
}
if (TREE_CODE (TREE_VALUE (parm)) == TYPE_DECL
|| TREE_CODE (TREE_VALUE (parm)) == TEMPLATE_DECL)
argument_pack = cxx_make_type (TYPE_ARGUMENT_PACK);
else
{
argument_pack = make_node (NONTYPE_ARGUMENT_PACK);
TREE_CONSTANT (argument_pack) = 1;
}
SET_ARGUMENT_PACK_ARGS (argument_pack, packed_args);
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (packed_args,
TREE_VEC_LENGTH (packed_args));
return argument_pack;
}
/* Returns the number of pack expansions in the template argument vector
ARGS. */
static int
pack_expansion_args_count (tree args)
{
int i;
int count = 0;
if (args)
for (i = 0; i < TREE_VEC_LENGTH (args); ++i)
{
tree elt = TREE_VEC_ELT (args, i);
if (elt && PACK_EXPANSION_P (elt))
++count;
}
return count;
}
/* Convert all template arguments to their appropriate types, and
return a vector containing the innermost resulting template
arguments. If any error occurs, return error_mark_node. Error and
warning messages are issued under control of COMPLAIN.
If REQUIRE_ALL_ARGS is false, argument deduction will be performed
for arguments not specified in ARGS. Otherwise, if
USE_DEFAULT_ARGS is true, default arguments will be used to fill in
unspecified arguments. If REQUIRE_ALL_ARGS is true, but
USE_DEFAULT_ARGS is false, then all arguments must be specified in
ARGS. */
static tree
coerce_template_parms (tree parms,
tree args,
tree in_decl,
tsubst_flags_t complain,
bool require_all_args,
bool use_default_args)
{
int nparms, nargs, parm_idx, arg_idx, lost = 0;
tree orig_inner_args;
tree inner_args;
tree new_args;
tree new_inner_args;
/* When used as a boolean value, indicates whether this is a
variadic template parameter list. Since it's an int, we can also
subtract it from nparms to get the number of non-variadic
parameters. */
int variadic_p = 0;
int variadic_args_p = 0;
int post_variadic_parms = 0;
/* Adjustment to nparms for fixed parameter packs. */
int fixed_pack_adjust = 0;
int fixed_packs = 0;
int missing = 0;
/* Likewise for parameters with default arguments. */
int default_p = 0;
if (args == error_mark_node)
return error_mark_node;
nparms = TREE_VEC_LENGTH (parms);
/* Determine if there are any parameter packs or default arguments. */
for (parm_idx = 0; parm_idx < nparms; ++parm_idx)
{
tree parm = TREE_VEC_ELT (parms, parm_idx);
if (variadic_p)
++post_variadic_parms;
if (template_parameter_pack_p (TREE_VALUE (parm)))
++variadic_p;
if (TREE_PURPOSE (parm))
++default_p;
}
inner_args = orig_inner_args = INNERMOST_TEMPLATE_ARGS (args);
/* If there are no parameters that follow a parameter pack, we need to
expand any argument packs so that we can deduce a parameter pack from
some non-packed args followed by an argument pack, as in variadic85.C.
If there are such parameters, we need to leave argument packs intact
so the arguments are assigned properly. This can happen when dealing
with a nested class inside a partial specialization of a class
template, as in variadic92.C, or when deducing a template parameter pack
from a sub-declarator, as in variadic114.C. */
if (!post_variadic_parms)
inner_args = expand_template_argument_pack (inner_args);
/* Count any pack expansion args. */
variadic_args_p = pack_expansion_args_count (inner_args);
nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0;
if ((nargs - variadic_args_p > nparms && !variadic_p)
|| (nargs < nparms - variadic_p
&& require_all_args
&& !variadic_args_p
&& (!use_default_args
|| (TREE_VEC_ELT (parms, nargs) != error_mark_node
&& !TREE_PURPOSE (TREE_VEC_ELT (parms, nargs))))))
{
bad_nargs:
if (complain & tf_error)
{
if (variadic_p || default_p)
{
nparms -= variadic_p + default_p;
error ("wrong number of template arguments "
"(%d, should be at least %d)", nargs, nparms);
}
else
error ("wrong number of template arguments "
"(%d, should be %d)", nargs, nparms);
if (in_decl)
inform (DECL_SOURCE_LOCATION (in_decl),
"provided for %qD", in_decl);
}
return error_mark_node;
}
/* We can't pass a pack expansion to a non-pack parameter of an alias
template (DR 1430). */
else if (in_decl
&& (DECL_ALIAS_TEMPLATE_P (in_decl)
|| concept_definition_p (in_decl))
&& variadic_args_p
&& nargs - variadic_args_p < nparms - variadic_p)
{
if (complain & tf_error)
{
for (int i = 0; i < TREE_VEC_LENGTH (inner_args); ++i)
{
tree arg = TREE_VEC_ELT (inner_args, i);
tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (PACK_EXPANSION_P (arg)
&& !template_parameter_pack_p (parm))
{
if (DECL_ALIAS_TEMPLATE_P (in_decl))
error_at (location_of (arg),
"pack expansion argument for non-pack parameter "
"%qD of alias template %qD", parm, in_decl);
else
error_at (location_of (arg),
"pack expansion argument for non-pack parameter "
"%qD of concept %qD", parm, in_decl);
inform (DECL_SOURCE_LOCATION (parm), "declared here");
goto found;
}
}
gcc_unreachable ();
found:;
}
return error_mark_node;
}
/* We need to evaluate the template arguments, even though this
template-id may be nested within a "sizeof". */
cp_evaluated ev;
new_inner_args = make_tree_vec (nparms);
new_args = add_outermost_template_args (args, new_inner_args);
int pack_adjust = 0;
for (parm_idx = 0, arg_idx = 0; parm_idx < nparms; parm_idx++, arg_idx++)
{
tree arg;
tree parm;
/* Get the Ith template parameter. */
parm = TREE_VEC_ELT (parms, parm_idx);
if (parm == error_mark_node)
{
TREE_VEC_ELT (new_inner_args, arg_idx) = error_mark_node;
continue;
}
/* Calculate the next argument. */
if (arg_idx < nargs)
arg = TREE_VEC_ELT (inner_args, arg_idx);
else
arg = NULL_TREE;
if (template_parameter_pack_p (TREE_VALUE (parm))
&& (arg || require_all_args || !(complain & tf_partial))
&& !(arg && ARGUMENT_PACK_P (arg)))
{
/* Some arguments will be placed in the
template parameter pack PARM. */
arg = coerce_template_parameter_pack (parms, parm_idx, args,
inner_args, arg_idx,
new_args, &lost,
in_decl, complain);
if (arg == NULL_TREE)
{
/* We don't know how many args we have yet, just use the
unconverted (and still packed) ones for now. */
new_inner_args = orig_inner_args;
arg_idx = nargs;
break;
}
TREE_VEC_ELT (new_inner_args, parm_idx) = arg;
/* Store this argument. */
if (arg == error_mark_node)
{
lost++;
/* We are done with all of the arguments. */
arg_idx = nargs;
break;
}
else
{
pack_adjust = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)) - 1;
arg_idx += pack_adjust;
if (fixed_parameter_pack_p (TREE_VALUE (parm)))
{
++fixed_packs;
fixed_pack_adjust += pack_adjust;
}
}
continue;
}
else if (arg)
{
if (PACK_EXPANSION_P (arg))
{
/* "If every valid specialization of a variadic template
requires an empty template parameter pack, the template is
ill-formed, no diagnostic required." So check that the
pattern works with this parameter. */
tree pattern = PACK_EXPANSION_PATTERN (arg);
tree conv = convert_template_argument (TREE_VALUE (parm),
pattern, new_args,
complain, parm_idx,
in_decl);
if (conv == error_mark_node)
{
if (complain & tf_error)
inform (input_location, "so any instantiation with a "
"non-empty parameter pack would be ill-formed");
++lost;
}
else if (TYPE_P (conv) && !TYPE_P (pattern))
/* Recover from missing typename. */
TREE_VEC_ELT (inner_args, arg_idx)
= make_pack_expansion (conv, complain);
/* We don't know how many args we have yet, just
use the unconverted ones for now. */
new_inner_args = inner_args;
arg_idx = nargs;
break;
}
}
else if (require_all_args)
{
/* There must be a default arg in this case. */
arg = tsubst_template_arg (TREE_PURPOSE (parm), new_args,
complain, in_decl);
/* The position of the first default template argument,
is also the number of non-defaulted arguments in NEW_INNER_ARGS.
Record that. */
if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args,
arg_idx - pack_adjust);
}
else
break;
if (arg == error_mark_node)
{
if (complain & tf_error)
error ("template argument %d is invalid", arg_idx + 1);
}
else if (!arg)
{
/* This can occur if there was an error in the template
parameter list itself (which we would already have
reported) that we are trying to recover from, e.g., a class
template with a parameter list such as
template<typename..., typename> (cpp0x/variadic150.C). */
++lost;
/* This can also happen with a fixed parameter pack (71834). */
if (arg_idx >= nargs)
++missing;
}
else
arg = convert_template_argument (TREE_VALUE (parm),
arg, new_args, complain,
parm_idx, in_decl);
if (arg == error_mark_node)
lost++;
TREE_VEC_ELT (new_inner_args, arg_idx - pack_adjust) = arg;
}
if (missing || arg_idx < nargs - variadic_args_p)
{
/* If we had fixed parameter packs, we didn't know how many arguments we
actually needed earlier; now we do. */
nparms += fixed_pack_adjust;
variadic_p -= fixed_packs;
goto bad_nargs;
}
if (arg_idx < nargs)
{
/* We had some pack expansion arguments that will only work if the packs
are empty, but wait until instantiation time to complain.
See variadic-ttp3.C. */
/* Except that we can't provide empty packs to alias templates or
concepts when there are no corresponding parameters. Basically,
we can get here with this:
template<typename T> concept C = true;
template<typename... Args>
requires C<Args...>
void f();
When parsing C<Args...>, we try to form a concept check of
C<?, Args...>. Without the extra check for substituting an empty
pack past the last parameter, we can accept the check as valid.
FIXME: This may be valid for alias templates (but I doubt it).
FIXME: The error could be better also. */
if (in_decl && concept_definition_p (in_decl))
{
if (complain & tf_error)
error_at (location_of (TREE_VEC_ELT (args, arg_idx)),
"too many arguments");
return error_mark_node;
}
int len = nparms + (nargs - arg_idx);
tree args = make_tree_vec (len);
int i = 0;
for (; i < nparms; ++i)
TREE_VEC_ELT (args, i) = TREE_VEC_ELT (new_inner_args, i);
for (; i < len; ++i, ++arg_idx)
TREE_VEC_ELT (args, i) = TREE_VEC_ELT (inner_args,
arg_idx - pack_adjust);
new_inner_args = args;
}
if (lost)
{
gcc_assert (!(complain & tf_error) || seen_error ());
return error_mark_node;
}
if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args,
TREE_VEC_LENGTH (new_inner_args));
return new_inner_args;
}
/* Convert all template arguments to their appropriate types, and
return a vector containing the innermost resulting template
arguments. If any error occurs, return error_mark_node. Error and
warning messages are not issued.
Note that no function argument deduction is performed, and default
arguments are used to fill in unspecified arguments. */
tree
coerce_template_parms (tree parms, tree args, tree in_decl)
{
return coerce_template_parms (parms, args, in_decl, tf_none, true, true);
}
/* Convert all template arguments to their appropriate type, and
instantiate default arguments as needed. This returns a vector
containing the innermost resulting template arguments, or
error_mark_node if unsuccessful. */
tree
coerce_template_parms (tree parms, tree args, tree in_decl,
tsubst_flags_t complain)
{
return coerce_template_parms (parms, args, in_decl, complain, true, true);
}
/* Like coerce_template_parms. If PARMS represents all template
parameters levels, this function returns a vector of vectors
representing all the resulting argument levels. Note that in this
case, only the innermost arguments are coerced because the
outermost ones are supposed to have been coerced already.
Otherwise, if PARMS represents only (the innermost) vector of
parameters, this function returns a vector containing just the
innermost resulting arguments. */
static tree
coerce_innermost_template_parms (tree parms,
tree args,
tree in_decl,
tsubst_flags_t complain,
bool require_all_args,
bool use_default_args)
{
int parms_depth = TMPL_PARMS_DEPTH (parms);
int args_depth = TMPL_ARGS_DEPTH (args);
tree coerced_args;
if (parms_depth > 1)
{
coerced_args = make_tree_vec (parms_depth);
tree level;
int cur_depth;
for (level = parms, cur_depth = parms_depth;
parms_depth > 0 && level != NULL_TREE;
level = TREE_CHAIN (level), --cur_depth)
{
tree l;
if (cur_depth == args_depth)
l = coerce_template_parms (TREE_VALUE (level),
args, in_decl, complain,
require_all_args,
use_default_args);
else
l = TMPL_ARGS_LEVEL (args, cur_depth);
if (l == error_mark_node)
return error_mark_node;
SET_TMPL_ARGS_LEVEL (coerced_args, cur_depth, l);
}
}
else
coerced_args = coerce_template_parms (INNERMOST_TEMPLATE_PARMS (parms),
args, in_decl, complain,
require_all_args,
use_default_args);
return coerced_args;
}
/* Returns true if T is a wrapper to make a C++20 template parameter
object const. */
static bool
class_nttp_const_wrapper_p (tree t)
{
if (cxx_dialect < cxx2a)
return false;
return (TREE_CODE (t) == VIEW_CONVERT_EXPR
&& CP_TYPE_CONST_P (TREE_TYPE (t))
&& TREE_CODE (TREE_OPERAND (t, 0)) == TEMPLATE_PARM_INDEX);
}
/* Returns 1 if template args OT and NT are equivalent. */
int
template_args_equal (tree ot, tree nt, bool partial_order /* = false */)
{
if (nt == ot)
return 1;
if (nt == NULL_TREE || ot == NULL_TREE)
return false;
if (nt == any_targ_node || ot == any_targ_node)
return true;
if (class_nttp_const_wrapper_p (nt))
nt = TREE_OPERAND (nt, 0);
if (class_nttp_const_wrapper_p (ot))
ot = TREE_OPERAND (ot, 0);
if (TREE_CODE (nt) == TREE_VEC)
/* For member templates */
return TREE_CODE (ot) == TREE_VEC && comp_template_args (ot, nt);
else if (PACK_EXPANSION_P (ot))
return (PACK_EXPANSION_P (nt)
&& template_args_equal (PACK_EXPANSION_PATTERN (ot),
PACK_EXPANSION_PATTERN (nt))
&& template_args_equal (PACK_EXPANSION_EXTRA_ARGS (ot),
PACK_EXPANSION_EXTRA_ARGS (nt)));
else if (ARGUMENT_PACK_P (ot) || ARGUMENT_PACK_P (nt))
return cp_tree_equal (ot, nt);
else if (ot && TREE_CODE (ot) == ARGUMENT_PACK_SELECT)
gcc_unreachable ();
else if (TYPE_P (nt))
{
if (!TYPE_P (ot))
return false;
/* Don't treat an alias template specialization with dependent
arguments as equivalent to its underlying type when used as a
template argument; we need them to be distinct so that we
substitute into the specialization arguments at instantiation
time. And aliases can't be equivalent without being ==, so
we don't need to look any deeper.
During partial ordering, however, we need to treat them normally so
that we can order uses of the same alias with different
cv-qualification (79960). */
if (!partial_order
&& (TYPE_ALIAS_P (nt) || TYPE_ALIAS_P (ot)))
return false;
else
return same_type_p (ot, nt);
}
else if (TREE_CODE (ot) == TREE_VEC || TYPE_P (ot))
return 0;
else
{
/* Try to treat a template non-type argument that has been converted
to the parameter type as equivalent to one that hasn't yet. */
for (enum tree_code code1 = TREE_CODE (ot);
CONVERT_EXPR_CODE_P (code1)
|| code1 == NON_LVALUE_EXPR;
code1 = TREE_CODE (ot))
ot = TREE_OPERAND (ot, 0);
for (enum tree_code code2 = TREE_CODE (nt);
CONVERT_EXPR_CODE_P (code2)
|| code2 == NON_LVALUE_EXPR;
code2 = TREE_CODE (nt))
nt = TREE_OPERAND (nt, 0);
return cp_tree_equal (ot, nt);
}
}
/* Returns 1 iff the OLDARGS and NEWARGS are in fact identical sets of
template arguments. Returns 0 otherwise, and updates OLDARG_PTR and
NEWARG_PTR with the offending arguments if they are non-NULL. */
int
comp_template_args (tree oldargs, tree newargs,
tree *oldarg_ptr, tree *newarg_ptr,
bool partial_order)
{
int i;
if (oldargs == newargs)
return 1;
if (!oldargs || !newargs)
return 0;
if (TREE_VEC_LENGTH (oldargs) != TREE_VEC_LENGTH (newargs))
return 0;
for (i = 0; i < TREE_VEC_LENGTH (oldargs); ++i)
{
tree nt = TREE_VEC_ELT (newargs, i);
tree ot = TREE_VEC_ELT (oldargs, i);
if (! template_args_equal (ot, nt, partial_order))
{
if (oldarg_ptr != NULL)
*oldarg_ptr = ot;
if (newarg_ptr != NULL)
*newarg_ptr = nt;
return 0;
}
}
return 1;
}
inline bool
comp_template_args_porder (tree oargs, tree nargs)
{
return comp_template_args (oargs, nargs, NULL, NULL, true);
}
/* Implement a freelist interface for objects of type T.
Head is a separate object, rather than a regular member, so that we
can define it as a GTY deletable pointer, which is highly
desirable. A data member could be declared that way, but then the
containing object would implicitly get GTY((user)), which would
prevent us from instantiating freelists as global objects.
Although this way we can create freelist global objects, they're
such thin wrappers that instantiating temporaries at every use
loses nothing and saves permanent storage for the freelist object.
Member functions next, anew, poison and reinit have default
implementations that work for most of the types we're interested
in, but if they don't work for some type, they should be explicitly
specialized. See the comments before them for requirements, and
the example specializations for the tree_list_freelist. */
template <typename T>
class freelist
{
/* Return the next object in a chain. We could just do type
punning, but if we access the object with its underlying type, we
avoid strict-aliasing trouble. This needs only work between
poison and reinit. */
static T *&next (T *obj) { return obj->next; }
/* Return a newly allocated, uninitialized or minimally-initialized
object of type T. Any initialization performed by anew should
either remain across the life of the object and the execution of
poison, or be redone by reinit. */
static T *anew () { return ggc_alloc<T> (); }
/* Optionally scribble all over the bits holding the object, so that
they become (mostly?) uninitialized memory. This is called while
preparing to make the object part of the free list. */
static void poison (T *obj) {
T *p ATTRIBUTE_UNUSED = obj;
T **q ATTRIBUTE_UNUSED = &next (obj);
#ifdef ENABLE_GC_CHECKING
/* Poison the data, to indicate the data is garbage. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, sizeof (*p)));
memset (p, 0xa5, sizeof (*p));
#endif
/* Let valgrind know the object is free. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, sizeof (*p)));
/* Let valgrind know the next portion of the object is available,
but uninitialized. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q)));
}
/* Bring an object that underwent at least one lifecycle after anew
and before the most recent free and poison, back to a usable
state, reinitializing whatever is needed for it to be
functionally equivalent to an object just allocated and returned
by anew. This may poison or clear the next field, used by
freelist housekeeping after poison was called. */
static void reinit (T *obj) {
T **q ATTRIBUTE_UNUSED = &next (obj);
#ifdef ENABLE_GC_CHECKING
memset (q, 0xa5, sizeof (*q));
#endif
/* Let valgrind know the entire object is available, but
uninitialized. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (*obj)));
}
/* Reference a GTY-deletable pointer that points to the first object
in the free list proper. */
T *&head;
public:
/* Construct a freelist object chaining objects off of HEAD. */
freelist (T *&head) : head(head) {}
/* Add OBJ to the free object list. The former head becomes OBJ's
successor. */
void free (T *obj)
{
poison (obj);
next (obj) = head;
head = obj;
}
/* Take an object from the free list, if one is available, or
allocate a new one. Objects taken from the free list should be
regarded as filled with garbage, except for bits that are
configured to be preserved across free and alloc. */
T *alloc ()
{
if (head)
{
T *obj = head;
head = next (head);
reinit (obj);
return obj;
}
else
return anew ();
}
};
/* Explicitly specialize the interfaces for freelist<tree_node>: we
want to allocate a TREE_LIST using the usual interface, and ensure
TREE_CHAIN remains functional. Alas, we have to duplicate a bit of
build_tree_list logic in reinit, so this could go out of sync. */
template <>
inline tree &
freelist<tree_node>::next (tree obj)
{
return TREE_CHAIN (obj);
}
template <>
inline tree
freelist<tree_node>::anew ()
{
return build_tree_list (NULL, NULL);
}
template <>
inline void
freelist<tree_node>::poison (tree obj ATTRIBUTE_UNUSED)
{
int size ATTRIBUTE_UNUSED = sizeof (tree_list);
tree p ATTRIBUTE_UNUSED = obj;
tree_base *b ATTRIBUTE_UNUSED = &obj->base;
tree *q ATTRIBUTE_UNUSED = &next (obj);
#ifdef ENABLE_GC_CHECKING
gcc_checking_assert (TREE_CODE (obj) == TREE_LIST);
/* Poison the data, to indicate the data is garbage. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
memset (p, 0xa5, size);
#endif
/* Let valgrind know the object is free. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
/* But we still want to use the TREE_CODE and TREE_CHAIN parts. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b)));
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q)));
#ifdef ENABLE_GC_CHECKING
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (b, sizeof (*b)));
/* Keep TREE_CHAIN functional. */
TREE_SET_CODE (obj, TREE_LIST);
#else
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b)));
#endif
}
template <>
inline void
freelist<tree_node>::reinit (tree obj ATTRIBUTE_UNUSED)
{
tree_base *b ATTRIBUTE_UNUSED = &obj->base;
#ifdef ENABLE_GC_CHECKING
gcc_checking_assert (TREE_CODE (obj) == TREE_LIST);
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list)));
memset (obj, 0, sizeof (tree_list));
#endif
/* Let valgrind know the entire object is available, but
uninitialized. */
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list)));
#ifdef ENABLE_GC_CHECKING
TREE_SET_CODE (obj, TREE_LIST);
#else
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b)));
#endif
}
/* Point to the first object in the TREE_LIST freelist. */
static GTY((deletable)) tree tree_list_freelist_head;
/* Return the/an actual TREE_LIST freelist. */
static inline freelist<tree_node>
tree_list_freelist ()
{
return tree_list_freelist_head;
}
/* Point to the first object in the tinst_level freelist. */
static GTY((deletable)) tinst_level *tinst_level_freelist_head;
/* Return the/an actual tinst_level freelist. */
static inline freelist<tinst_level>
tinst_level_freelist ()
{
return tinst_level_freelist_head;
}
/* Point to the first object in the pending_template freelist. */
static GTY((deletable)) pending_template *pending_template_freelist_head;
/* Return the/an actual pending_template freelist. */
static inline freelist<pending_template>
pending_template_freelist ()
{
return pending_template_freelist_head;
}
/* Build the TREE_LIST object out of a split list, store it
permanently, and return it. */
tree
tinst_level::to_list ()
{
gcc_assert (split_list_p ());
tree ret = tree_list_freelist ().alloc ();
TREE_PURPOSE (ret) = tldcl;
TREE_VALUE (ret) = targs;
tldcl = ret;
targs = NULL;
gcc_assert (tree_list_p ());
return ret;
}
const unsigned short tinst_level::refcount_infinity;
/* Increment OBJ's refcount unless it is already infinite. */
static tinst_level *
inc_refcount_use (tinst_level *obj)
{
if (obj && obj->refcount != tinst_level::refcount_infinity)
++obj->refcount;
return obj;
}
/* Release storage for OBJ and node, if it's a TREE_LIST. */
void
tinst_level::free (tinst_level *obj)
{
if (obj->tree_list_p ())
tree_list_freelist ().free (obj->get_node ());
tinst_level_freelist ().free (obj);
}
/* Decrement OBJ's refcount if not infinite. If it reaches zero, release
OBJ's DECL and OBJ, and start over with the tinst_level object that
used to be referenced by OBJ's NEXT. */
static void
dec_refcount_use (tinst_level *obj)
{
while (obj
&& obj->refcount != tinst_level::refcount_infinity
&& !--obj->refcount)
{
tinst_level *next = obj->next;
tinst_level::free (obj);
obj = next;
}
}
/* Modify PTR so that it points to OBJ, adjusting the refcounts of OBJ
and of the former PTR. Omitting the second argument is equivalent
to passing (T*)NULL; this is allowed because passing the
zero-valued integral constant NULL confuses type deduction and/or
overload resolution. */
template <typename T>
static void
set_refcount_ptr (T *& ptr, T *obj = NULL)
{
T *save = ptr;
ptr = inc_refcount_use (obj);
dec_refcount_use (save);
}
static void
add_pending_template (tree d)
{
tree ti = (TYPE_P (d)
? CLASSTYPE_TEMPLATE_INFO (d)
: DECL_TEMPLATE_INFO (d));
struct pending_template *pt;
int level;
if (TI_PENDING_TEMPLATE_FLAG (ti))
return;
/* We are called both from instantiate_decl, where we've already had a
tinst_level pushed, and instantiate_template, where we haven't.
Compensate. */
gcc_assert (TREE_CODE (d) != TREE_LIST);
level = !current_tinst_level
|| current_tinst_level->maybe_get_node () != d;
if (level)
push_tinst_level (d);
pt = pending_template_freelist ().alloc ();
pt->next = NULL;
pt->tinst = NULL;
set_refcount_ptr (pt->tinst, current_tinst_level);
if (last_pending_template)
last_pending_template->next = pt;
else
pending_templates = pt;
last_pending_template = pt;
TI_PENDING_TEMPLATE_FLAG (ti) = 1;
if (level)
pop_tinst_level ();
}
/* Return a TEMPLATE_ID_EXPR corresponding to the indicated FNS and
ARGLIST. Valid choices for FNS are given in the cp-tree.def
documentation for TEMPLATE_ID_EXPR. */
tree
lookup_template_function (tree fns, tree arglist)
{
if (fns == error_mark_node || arglist == error_mark_node)
return error_mark_node;
gcc_assert (!arglist || TREE_CODE (arglist) == TREE_VEC);
if (!is_overloaded_fn (fns) && !identifier_p (fns))
{
error ("%q#D is not a function template", fns);
return error_mark_node;
}
if (BASELINK_P (fns))
{
BASELINK_FUNCTIONS (fns) = build2 (TEMPLATE_ID_EXPR,
unknown_type_node,
BASELINK_FUNCTIONS (fns),
arglist);
return fns;
}
return build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, arglist);
}
/* Within the scope of a template class S<T>, the name S gets bound
(in build_self_reference) to a TYPE_DECL for the class, not a
TEMPLATE_DECL. If DECL is a TYPE_DECL for current_class_type,
or one of its enclosing classes, and that type is a template,
return the associated TEMPLATE_DECL. Otherwise, the original
DECL is returned.
Also handle the case when DECL is a TREE_LIST of ambiguous
injected-class-names from different bases. */
tree
maybe_get_template_decl_from_type_decl (tree decl)
{
if (decl == NULL_TREE)
return decl;
/* DR 176: A lookup that finds an injected-class-name (10.2
[class.member.lookup]) can result in an ambiguity in certain cases
(for example, if it is found in more than one base class). If all of
the injected-class-names that are found refer to specializations of
the same class template, and if the name is followed by a
template-argument-list, the reference refers to the class template
itself and not a specialization thereof, and is not ambiguous. */
if (TREE_CODE (decl) == TREE_LIST)
{
tree t, tmpl = NULL_TREE;
for (t = decl; t; t = TREE_CHAIN (t))
{
tree elt = maybe_get_template_decl_from_type_decl (TREE_VALUE (t));
if (!tmpl)
tmpl = elt;
else if (tmpl != elt)
break;
}
if (tmpl && t == NULL_TREE)
return tmpl;
else
return decl;
}
return (decl != NULL_TREE
&& DECL_SELF_REFERENCE_P (decl)
&& CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (decl)))
? CLASSTYPE_TI_TEMPLATE (TREE_TYPE (decl)) : decl;
}
/* Given an IDENTIFIER_NODE (or type TEMPLATE_DECL) and a chain of
parameters, find the desired type.
D1 is the PTYPENAME terminal, and ARGLIST is the list of arguments.
IN_DECL, if non-NULL, is the template declaration we are trying to
instantiate.
If ENTERING_SCOPE is nonzero, we are about to enter the scope of
the class we are looking up.
Issue error and warning messages under control of COMPLAIN.
If the template class is really a local class in a template
function, then the FUNCTION_CONTEXT is the function in which it is
being instantiated.
??? Note that this function is currently called *twice* for each
template-id: the first time from the parser, while creating the
incomplete type (finish_template_type), and the second type during the
real instantiation (instantiate_template_class). This is surely something
that we want to avoid. It also causes some problems with argument
coercion (see convert_nontype_argument for more information on this). */
static tree
lookup_template_class_1 (tree d1, tree arglist, tree in_decl, tree context,
int entering_scope, tsubst_flags_t complain)
{
tree templ = NULL_TREE, parmlist;
tree t;
spec_entry **slot;
spec_entry *entry;
spec_entry elt;
hashval_t hash;
if (identifier_p (d1))
{
tree value = innermost_non_namespace_value (d1);
if (value && DECL_TEMPLATE_TEMPLATE_PARM_P (value))
templ = value;
else
{
if (context)
push_decl_namespace (context);
templ = lookup_name (d1);
templ = maybe_get_template_decl_from_type_decl (templ);
if (context)
pop_decl_namespace ();
}
if (templ)
context = DECL_CONTEXT (templ);
}
else if (TREE_CODE (d1) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (d1)))
{
tree type = TREE_TYPE (d1);
/* If we are declaring a constructor, say A<T>::A<T>, we will get
an implicit typename for the second A. Deal with it. */
if (TREE_CODE (type) == TYPENAME_TYPE && TREE_TYPE (type))
type = TREE_TYPE (type);
if (CLASSTYPE_TEMPLATE_INFO (type))
{
templ = CLASSTYPE_TI_TEMPLATE (type);
d1 = DECL_NAME (templ);
}
}
else if (TREE_CODE (d1) == ENUMERAL_TYPE
|| (TYPE_P (d1) && MAYBE_CLASS_TYPE_P (d1)))
{
templ = TYPE_TI_TEMPLATE (d1);
d1 = DECL_NAME (templ);
}
else if (DECL_TYPE_TEMPLATE_P (d1))
{
templ = d1;
d1 = DECL_NAME (templ);
context = DECL_CONTEXT (templ);
}
else if (DECL_TEMPLATE_TEMPLATE_PARM_P (d1))
{
templ = d1;
d1 = DECL_NAME (templ);
}
/* Issue an error message if we didn't find a template. */
if (! templ)
{
if (complain & tf_error)
error ("%qT is not a template", d1);
return error_mark_node;
}
if (TREE_CODE (templ) != TEMPLATE_DECL
/* Make sure it's a user visible template, if it was named by
the user. */
|| ((complain & tf_user) && !DECL_TEMPLATE_PARM_P (templ)
&& !PRIMARY_TEMPLATE_P (templ)))
{
if (complain & tf_error)
{
error ("non-template type %qT used as a template", d1);
if (in_decl)
error ("for template declaration %q+D", in_decl);
}
return error_mark_node;
}
complain &= ~tf_user;
/* An alias that just changes the name of a template is equivalent to the
other template, so if any of the arguments are pack expansions, strip
the alias to avoid problems with a pack expansion passed to a non-pack
alias template parameter (DR 1430). */
if (pack_expansion_args_count (INNERMOST_TEMPLATE_ARGS (arglist)))
templ = get_underlying_template (templ);
if (DECL_TEMPLATE_TEMPLATE_PARM_P (templ))
{
tree parm;
tree arglist2 = coerce_template_args_for_ttp (templ, arglist, complain);
if (arglist2 == error_mark_node
|| (!uses_template_parms (arglist2)
&& check_instantiated_args (templ, arglist2, complain)))
return error_mark_node;
parm = bind_template_template_parm (TREE_TYPE (templ), arglist2);
return parm;
}
else
{
tree template_type = TREE_TYPE (templ);
tree gen_tmpl;
tree type_decl;
tree found = NULL_TREE;
int arg_depth;
int parm_depth;
int is_dependent_type;
int use_partial_inst_tmpl = false;
if (template_type == error_mark_node)
/* An error occurred while building the template TEMPL, and a
diagnostic has most certainly been emitted for that
already. Let's propagate that error. */
return error_mark_node;
gen_tmpl = most_general_template (templ);
parmlist = DECL_TEMPLATE_PARMS (gen_tmpl);
parm_depth = TMPL_PARMS_DEPTH (parmlist);
arg_depth = TMPL_ARGS_DEPTH (arglist);
if (arg_depth == 1 && parm_depth > 1)
{
/* We've been given an incomplete set of template arguments.
For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
we will be called with an ARGLIST of `U*', but the
TEMPLATE will be `template <class T> template
<class U> struct S1<T>::S2'. We must fill in the missing
arguments. */
tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (templ));
arglist = add_outermost_template_args (TI_ARGS (ti), arglist);
arg_depth = TMPL_ARGS_DEPTH (arglist);
}
/* Now we should have enough arguments. */
gcc_assert (parm_depth == arg_depth);
/* From here on, we're only interested in the most general
template. */
/* Calculate the BOUND_ARGS. These will be the args that are
actually tsubst'd into the definition to create the
instantiation. */
arglist = coerce_innermost_template_parms (parmlist, arglist, gen_tmpl,
complain,
/*require_all_args=*/true,
/*use_default_args=*/true);
if (arglist == error_mark_node)
/* We were unable to bind the arguments. */
return error_mark_node;
/* In the scope of a template class, explicit references to the
template class refer to the type of the template, not any
instantiation of it. For example, in:
template <class T> class C { void f(C<T>); }
the `C<T>' is just the same as `C'. Outside of the
class, however, such a reference is an instantiation. */
if (entering_scope
|| !PRIMARY_TEMPLATE_P (gen_tmpl)
|| currently_open_class (template_type))
{
tree tinfo = TYPE_TEMPLATE_INFO (template_type);
if (tinfo && comp_template_args (TI_ARGS (tinfo), arglist))
return template_type;
}
/* If we already have this specialization, return it. */
elt.tmpl = gen_tmpl;
elt.args = arglist;
elt.spec = NULL_TREE;
hash = spec_hasher::hash (&elt);
entry = type_specializations->find_with_hash (&elt, hash);
if (entry)
return entry->spec;
/* If the template's constraints are not satisfied,
then we cannot form a valid type.
Note that the check is deferred until after the hash
lookup. This prevents redundant checks on previously
instantiated specializations. */
if (flag_concepts
&& !DECL_ALIAS_TEMPLATE_P (gen_tmpl)
&& !constraints_satisfied_p (gen_tmpl, arglist))
{
if (complain & tf_error)
{
auto_diagnostic_group d;
error ("template constraint failure for %qD", gen_tmpl);
diagnose_constraints (input_location, gen_tmpl, arglist);
}
return error_mark_node;
}
is_dependent_type = uses_template_parms (arglist);
/* If the deduced arguments are invalid, then the binding
failed. */
if (!is_dependent_type
&& check_instantiated_args (gen_tmpl,
INNERMOST_TEMPLATE_ARGS (arglist),
complain))
return error_mark_node;
if (!is_dependent_type
&& !PRIMARY_TEMPLATE_P (gen_tmpl)
&& !LAMBDA_TYPE_P (TREE_TYPE (gen_tmpl))
&& TREE_CODE (CP_DECL_CONTEXT (gen_tmpl)) == NAMESPACE_DECL)
{
found = xref_tag_from_type (TREE_TYPE (gen_tmpl),
DECL_NAME (gen_tmpl),
/*tag_scope=*/ts_global);
return found;
}
context = DECL_CONTEXT (gen_tmpl);
if (context && TYPE_P (context))
{
context = tsubst_aggr_type (context, arglist, complain, in_decl, true);
context = complete_type (context);
}
else
context = tsubst (context, arglist, complain, in_decl);
if (context == error_mark_node)
return error_mark_node;
if (!context)
context = global_namespace;
/* Create the type. */
if (DECL_ALIAS_TEMPLATE_P (gen_tmpl))
{
/* The user referred to a specialization of an alias
template represented by GEN_TMPL.
[temp.alias]/2 says:
When a template-id refers to the specialization of an
alias template, it is equivalent to the associated
type obtained by substitution of its
template-arguments for the template-parameters in the
type-id of the alias template. */
t = tsubst (TREE_TYPE (gen_tmpl), arglist, complain, in_decl);
/* Note that the call above (by indirectly calling
register_specialization in tsubst_decl) registers the
TYPE_DECL representing the specialization of the alias
template. So next time someone substitutes ARGLIST for
the template parms into the alias template (GEN_TMPL),
she'll get that TYPE_DECL back. */
if (t == error_mark_node)
return t;
}
else if (TREE_CODE (template_type) == ENUMERAL_TYPE)
{
if (!is_dependent_type)
{
set_current_access_from_decl (TYPE_NAME (template_type));
t = start_enum (TYPE_IDENTIFIER (template_type), NULL_TREE,
tsubst (ENUM_UNDERLYING_TYPE (template_type),
arglist, complain, in_decl),
tsubst_attributes (TYPE_ATTRIBUTES (template_type),
arglist, complain, in_decl),
SCOPED_ENUM_P (template_type), NULL);
if (t == error_mark_node)
return t;
}
else
{
/* We don't want to call start_enum for this type, since
the values for the enumeration constants may involve
template parameters. And, no one should be interested
in the enumeration constants for such a type. */
t = cxx_make_type (ENUMERAL_TYPE);
SET_SCOPED_ENUM_P (t, SCOPED_ENUM_P (template_type));
}
SET_OPAQUE_ENUM_P (t, OPAQUE_ENUM_P (template_type));
ENUM_FIXED_UNDERLYING_TYPE_P (t)
= ENUM_FIXED_UNDERLYING_TYPE_P (template_type);
}
else if (CLASS_TYPE_P (template_type))
{
/* Lambda closures are regenerated in tsubst_lambda_expr, not
instantiated here. */
gcc_assert (!LAMBDA_TYPE_P (template_type));
t = make_class_type (TREE_CODE (template_type));
CLASSTYPE_DECLARED_CLASS (t)
= CLASSTYPE_DECLARED_CLASS (template_type);
SET_CLASSTYPE_IMPLICIT_INSTANTIATION (t);
/* A local class. Make sure the decl gets registered properly. */
if (context == current_function_decl)
if (pushtag (DECL_NAME (gen_tmpl), t, /*tag_scope=*/ts_current)
== error_mark_node)
return error_mark_node;
if (comp_template_args (CLASSTYPE_TI_ARGS (template_type), arglist))
/* This instantiation is another name for the primary
template type. Set the TYPE_CANONICAL field
appropriately. */
TYPE_CANONICAL (t) = template_type;
else if (any_template_arguments_need_structural_equality_p (arglist))
/* Some of the template arguments require structural
equality testing, so this template class requires
structural equality testing. */
SET_TYPE_STRUCTURAL_EQUALITY (t);
}
else
gcc_unreachable ();
/* If we called start_enum or pushtag above, this information
will already be set up. */
if (!TYPE_NAME (t))
{
TYPE_CONTEXT (t) = FROB_CONTEXT (context);
type_decl = create_implicit_typedef (DECL_NAME (gen_tmpl), t);
DECL_CONTEXT (type_decl) = TYPE_CONTEXT (t);
DECL_SOURCE_LOCATION (type_decl)
= DECL_SOURCE_LOCATION (TYPE_STUB_DECL (template_type));
}
else
type_decl = TYPE_NAME (t);
if (CLASS_TYPE_P (template_type))
{
TREE_PRIVATE (type_decl)
= TREE_PRIVATE (TYPE_MAIN_DECL (template_type));
TREE_PROTECTED (type_decl)
= TREE_PROTECTED (TYPE_MAIN_DECL (template_type));
if (CLASSTYPE_VISIBILITY_SPECIFIED (template_type))
{
DECL_VISIBILITY_SPECIFIED (type_decl) = 1;
DECL_VISIBILITY (type_decl) = CLASSTYPE_VISIBILITY (template_type);
}
}
if (OVERLOAD_TYPE_P (t)
&& !DECL_ALIAS_TEMPLATE_P (gen_tmpl))
{
static const char *tags[] = {"abi_tag", "may_alias"};
for (unsigned ix = 0; ix != 2; ix++)
{
tree attributes
= lookup_attribute (tags[ix], TYPE_ATTRIBUTES (template_type));
if (attributes)
TYPE_ATTRIBUTES (t)
= tree_cons (TREE_PURPOSE (attributes),
TREE_VALUE (attributes),
TYPE_ATTRIBUTES (t));
}
}
/* Let's consider the explicit specialization of a member
of a class template specialization that is implicitly instantiated,
e.g.:
template<class T>
struct S
{
template<class U> struct M {}; //#0
};
template<>
template<>
struct S<int>::M<char> //#1
{
int i;
};
[temp.expl.spec]/4 says this is valid.
In this case, when we write:
S<int>::M<char> m;
M is instantiated from the CLASSTYPE_TI_TEMPLATE of #1, not from
the one of #0.
When we encounter #1, we want to store the partial instantiation
of M (template<class T> S<int>::M<T>) in its CLASSTYPE_TI_TEMPLATE.
For all cases other than this "explicit specialization of member of a
class template", we just want to store the most general template into
the CLASSTYPE_TI_TEMPLATE of M.
This case of "explicit specialization of member of a class template"
only happens when:
1/ the enclosing class is an instantiation of, and therefore not
the same as, the context of the most general template, and
2/ we aren't looking at the partial instantiation itself, i.e.
the innermost arguments are not the same as the innermost parms of
the most general template.
So it's only when 1/ and 2/ happens that we want to use the partial
instantiation of the member template in lieu of its most general
template. */
if (PRIMARY_TEMPLATE_P (gen_tmpl)
&& TMPL_ARGS_HAVE_MULTIPLE_LEVELS (arglist)
/* the enclosing class must be an instantiation... */
&& CLASS_TYPE_P (context)
&& !same_type_p (context, DECL_CONTEXT (gen_tmpl)))
{
TREE_VEC_LENGTH (arglist)--;
++processing_template_decl;
tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (gen_tmpl));
tree partial_inst_args =
tsubst (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)),
arglist, complain, NULL_TREE);
--processing_template_decl;
TREE_VEC_LENGTH (arglist)++;
if (partial_inst_args == error_mark_node)
return error_mark_node;
use_partial_inst_tmpl =
/*...and we must not be looking at the partial instantiation
itself. */
!comp_template_args (INNERMOST_TEMPLATE_ARGS (arglist),
partial_inst_args);
}
if (!use_partial_inst_tmpl)
/* This case is easy; there are no member templates involved. */
found = gen_tmpl;
else
{
/* This is a full instantiation of a member template. Find
the partial instantiation of which this is an instance. */
/* Temporarily reduce by one the number of levels in the ARGLIST
so as to avoid comparing the last set of arguments. */
TREE_VEC_LENGTH (arglist)--;
/* We don't use COMPLAIN in the following call because this isn't
the immediate context of deduction. For instance, tf_partial
could be set here as we might be at the beginning of template
argument deduction when any explicitly specified template
arguments are substituted into the function type. tf_partial
could lead into trouble because we wouldn't find the partial
instantiation that might have been created outside tf_partial
context, because the levels of template parameters wouldn't
match, because in a tf_partial context, tsubst doesn't reduce
TEMPLATE_PARM_LEVEL. */
found = tsubst (gen_tmpl, arglist, tf_none, NULL_TREE);
TREE_VEC_LENGTH (arglist)++;
/* FOUND is either a proper class type, or an alias
template specialization. In the later case, it's a
TYPE_DECL, resulting from the substituting of arguments
for parameters in the TYPE_DECL of the alias template
done earlier. So be careful while getting the template
of FOUND. */
found = (TREE_CODE (found) == TEMPLATE_DECL
? found
: (TREE_CODE (found) == TYPE_DECL
? DECL_TI_TEMPLATE (found)
: CLASSTYPE_TI_TEMPLATE (found)));
if (DECL_CLASS_TEMPLATE_P (found)
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (found)))
{
/* If this partial instantiation is specialized, we want to
use it for hash table lookup. */
elt.tmpl = found;
elt.args = arglist = INNERMOST_TEMPLATE_ARGS (arglist);
hash = spec_hasher::hash (&elt);
}
}
// Build template info for the new specialization.
SET_TYPE_TEMPLATE_INFO (t, build_template_info (found, arglist));
elt.spec = t;
slot = type_specializations->find_slot_with_hash (&elt, hash, INSERT);
gcc_checking_assert (*slot == NULL);
entry = ggc_alloc<spec_entry> ();
*entry = elt;
*slot = entry;
/* Note this use of the partial instantiation so we can check it
later in maybe_process_partial_specialization. */
DECL_TEMPLATE_INSTANTIATIONS (found)
= tree_cons (arglist, t,
DECL_TEMPLATE_INSTANTIATIONS (found));
if (TREE_CODE (template_type) == ENUMERAL_TYPE && !is_dependent_type
&& !DECL_ALIAS_TEMPLATE_P (gen_tmpl))
/* Now that the type has been registered on the instantiations
list, we set up the enumerators. Because the enumeration
constants may involve the enumeration type itself, we make
sure to register the type first, and then create the
constants. That way, doing tsubst_expr for the enumeration
constants won't result in recursive calls here; we'll find
the instantiation and exit above. */
tsubst_enum (template_type, t, arglist);
if (CLASS_TYPE_P (template_type) && is_dependent_type)
/* If the type makes use of template parameters, the
code that generates debugging information will crash. */
DECL_IGNORED_P (TYPE_MAIN_DECL (t)) = 1;
/* Possibly limit visibility based on template args. */
TREE_PUBLIC (type_decl) = 1;
determine_visibility (type_decl);
inherit_targ_abi_tags (t);
return t;
}
}
/* Wrapper for lookup_template_class_1. */
tree
lookup_template_class (tree d1, tree arglist, tree in_decl, tree context,
int entering_scope, tsubst_flags_t complain)
{
tree ret;
timevar_push (TV_TEMPLATE_INST);
ret = lookup_template_class_1 (d1, arglist, in_decl, context,
entering_scope, complain);
timevar_pop (TV_TEMPLATE_INST);
return ret;
}
/* Return a TEMPLATE_ID_EXPR for the given variable template and ARGLIST. */
tree
lookup_template_variable (tree templ, tree arglist)
{
if (flag_concepts && variable_concept_p (templ))
return build_concept_check (templ, arglist, tf_none);
/* The type of the expression is NULL_TREE since the template-id could refer
to an explicit or partial specialization. */
return build2 (TEMPLATE_ID_EXPR, NULL_TREE, templ, arglist);
}
/* Instantiate a variable declaration from a TEMPLATE_ID_EXPR for use. */
tree
finish_template_variable (tree var, tsubst_flags_t complain)
{
tree templ = TREE_OPERAND (var, 0);
tree arglist = TREE_OPERAND (var, 1);
tree tmpl_args = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (templ));
arglist = add_outermost_template_args (tmpl_args, arglist);
templ = most_general_template (templ);
tree parms = DECL_TEMPLATE_PARMS (templ);
arglist = coerce_innermost_template_parms (parms, arglist, templ, complain,
/*req_all*/true,
/*use_default*/true);
if (flag_concepts && !constraints_satisfied_p (templ, arglist))
{
if (complain & tf_error)
{
auto_diagnostic_group d;
error ("use of invalid variable template %qE", var);
diagnose_constraints (location_of (var), templ, arglist);
}
return error_mark_node;
}
return instantiate_template (templ, arglist, complain);
}
/* Construct a TEMPLATE_ID_EXPR for the given variable template TEMPL having
TARGS template args, and instantiate it if it's not dependent. */
tree
lookup_and_finish_template_variable (tree templ, tree targs,
tsubst_flags_t complain)
{
templ = lookup_template_variable (templ, targs);
if (!any_dependent_template_arguments_p (targs))
{
templ = finish_template_variable (templ, complain);
mark_used (templ);
}
return convert_from_reference (templ);
}
struct pair_fn_data
{
tree_fn_t fn;
tree_fn_t any_fn;
void *data;
/* True when we should also visit template parameters that occur in
non-deduced contexts. */
bool include_nondeduced_p;
hash_set<tree> *visited;
};
/* Called from for_each_template_parm via walk_tree. */
static tree
for_each_template_parm_r (tree *tp, int *walk_subtrees, void *d)
{
tree t = *tp;
struct pair_fn_data *pfd = (struct pair_fn_data *) d;
tree_fn_t fn = pfd->fn;
void *data = pfd->data;
tree result = NULL_TREE;
#define WALK_SUBTREE(NODE) \
do \
{ \
result = for_each_template_parm (NODE, fn, data, pfd->visited, \
pfd->include_nondeduced_p, \
pfd->any_fn); \
if (result) goto out; \
} \
while (0)
if (pfd->any_fn && (*pfd->any_fn)(t, data))
return t;
if (TYPE_P (t)
&& (pfd->include_nondeduced_p || TREE_CODE (t) != TYPENAME_TYPE))
WALK_SUBTREE (TYPE_CONTEXT (t));
switch (TREE_CODE (t))
{
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
break;
/* Fall through. */
case UNION_TYPE:
case ENUMERAL_TYPE:
if (!TYPE_TEMPLATE_INFO (t))
*walk_subtrees = 0;
else
WALK_SUBTREE (TYPE_TI_ARGS (t));
break;
case INTEGER_TYPE:
WALK_SUBTREE (TYPE_MIN_VALUE (t));
WALK_SUBTREE (TYPE_MAX_VALUE (t));
break;
case METHOD_TYPE:
/* Since we're not going to walk subtrees, we have to do this
explicitly here. */
WALK_SUBTREE (TYPE_METHOD_BASETYPE (t));
/* Fall through. */
case FUNCTION_TYPE:
/* Check the return type. */
WALK_SUBTREE (TREE_TYPE (t));
/* Check the parameter types. Since default arguments are not
instantiated until they are needed, the TYPE_ARG_TYPES may
contain expressions that involve template parameters. But,
no-one should be looking at them yet. And, once they're
instantiated, they don't contain template parameters, so
there's no point in looking at them then, either. */
{
tree parm;
for (parm = TYPE_ARG_TYPES (t); parm; parm = TREE_CHAIN (parm))
WALK_SUBTREE (TREE_VALUE (parm));
/* Since we've already handled the TYPE_ARG_TYPES, we don't
want walk_tree walking into them itself. */
*walk_subtrees = 0;
}
if (flag_noexcept_type)
{
tree spec = TYPE_RAISES_EXCEPTIONS (t);
if (spec)
WALK_SUBTREE (TREE_PURPOSE (spec));
}
break;
case TYPEOF_TYPE:
case DECLTYPE_TYPE:
case UNDERLYING_TYPE:
if (pfd->include_nondeduced_p
&& for_each_template_parm (TYPE_VALUES_RAW (t), fn, data,
pfd->visited,
pfd->include_nondeduced_p,
pfd->any_fn))
return error_mark_node;
*walk_subtrees = false;
break;
case FUNCTION_DECL:
case VAR_DECL:
if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t))
WALK_SUBTREE (DECL_TI_ARGS (t));
/* Fall through. */
case PARM_DECL:
case CONST_DECL:
if (TREE_CODE (t) == CONST_DECL && DECL_TEMPLATE_PARM_P (t))
WALK_SUBTREE (DECL_INITIAL (t));
if (DECL_CONTEXT (t)
&& pfd->include_nondeduced_p)
WALK_SUBTREE (DECL_CONTEXT (t));
break;
case BOUND_TEMPLATE_TEMPLATE_PARM:
/* Record template parameters such as `T' inside `TT<T>'. */
WALK_SUBTREE (TYPE_TI_ARGS (t));
/* Fall through. */
case TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_PARM_INDEX:
if (fn && (*fn)(t, data))
return t;
else if (!fn)
return t;
break;
case TEMPLATE_DECL:
/* A template template parameter is encountered. */
if (DECL_TEMPLATE_TEMPLATE_PARM_P (t))
WALK_SUBTREE (TREE_TYPE (t));
/* Already substituted template template parameter */
*walk_subtrees = 0;
break;
case TYPENAME_TYPE:
/* A template-id in a TYPENAME_TYPE might be a deduced context after
partial instantiation. */
WALK_SUBTREE (TYPENAME_TYPE_FULLNAME (t));
break;
case CONSTRUCTOR:
if (TREE_TYPE (t) && TYPE_PTRMEMFUNC_P (TREE_TYPE (t))
&& pfd->include_nondeduced_p)
WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (t)));
break;
case INDIRECT_REF:
case COMPONENT_REF:
/* If there's no type, then this thing must be some expression
involving template parameters. */
if (!fn && !TREE_TYPE (t))
return error_mark_node;
break;
case MODOP_EXPR:
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case STATIC_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case ARROW_EXPR:
case DOTSTAR_EXPR:
case TYPEID_EXPR:
case PSEUDO_DTOR_EXPR:
if (!fn)
return error_mark_node;
break;
case SCOPE_REF:
if (pfd->include_nondeduced_p)
WALK_SUBTREE (TREE_OPERAND (t, 0));
break;
case REQUIRES_EXPR:
{
if (!fn)
return error_mark_node;
/* Recursively walk the type of each constraint variable. */
tree p = TREE_OPERAND (t, 0);
while (p)
{
WALK_SUBTREE (TREE_TYPE (p));
p = TREE_CHAIN (p);
}
}
break;
default:
break;
}
#undef WALK_SUBTREE
/* We didn't find any template parameters we liked. */
out:
return result;
}
/* For each TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM,
BOUND_TEMPLATE_TEMPLATE_PARM or TEMPLATE_PARM_INDEX in T,
call FN with the parameter and the DATA.
If FN returns nonzero, the iteration is terminated, and
for_each_template_parm returns 1. Otherwise, the iteration
continues. If FN never returns a nonzero value, the value
returned by for_each_template_parm is 0. If FN is NULL, it is
considered to be the function which always returns 1.
If INCLUDE_NONDEDUCED_P, then this routine will also visit template
parameters that occur in non-deduced contexts. When false, only
visits those template parameters that can be deduced. */
static tree
for_each_template_parm (tree t, tree_fn_t fn, void* data,
hash_set<tree> *visited,
bool include_nondeduced_p,
tree_fn_t any_fn)
{
struct pair_fn_data pfd;
tree result;
/* Set up. */
pfd.fn = fn;
pfd.any_fn = any_fn;
pfd.data = data;
pfd.include_nondeduced_p = include_nondeduced_p;
/* Walk the tree. (Conceptually, we would like to walk without
duplicates, but for_each_template_parm_r recursively calls
for_each_template_parm, so we would need to reorganize a fair
bit to use walk_tree_without_duplicates, so we keep our own
visited list.) */
if (visited)
pfd.visited = visited;
else
pfd.visited = new hash_set<tree>;
result = cp_walk_tree (&t,
for_each_template_parm_r,
&pfd,
pfd.visited);
/* Clean up. */
if (!visited)
{
delete pfd.visited;
pfd.visited = 0;
}
return result;
}
struct find_template_parameter_info
{
explicit find_template_parameter_info (tree ctx_parms)
: parm_list (NULL_TREE),
ctx_parms (ctx_parms),
max_depth (TMPL_PARMS_DEPTH (ctx_parms))
{}
hash_set<tree> visited;
hash_set<tree> parms;
tree parm_list;
tree ctx_parms;
int max_depth;
};
/* Appends the declaration of T to the list in DATA. */
static int
keep_template_parm (tree t, void* data)
{
find_template_parameter_info *ftpi = (find_template_parameter_info*)data;
/* Template parameters declared within the expression are not part of
the parameter mapping. For example, in this concept:
template<typename T>
concept C = requires { <expr> } -> same_as<int>;
the return specifier same_as<int> declares a new decltype parameter
that must not be part of the parameter mapping. The same is true
for generic lambda parameters, lambda template parameters, etc. */
int level;
int index;
template_parm_level_and_index (t, &level, &index);
if (level > ftpi->max_depth)
return 0;
if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM)
/* We want the underlying TEMPLATE_TEMPLATE_PARM, not the
BOUND_TEMPLATE_TEMPLATE_PARM itself. */
t = TREE_TYPE (TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (t));
/* Arguments like const T yield parameters like const T. This means that
a template-id like X<T, const T> would yield two distinct parameters:
T and const T. Adjust types to their unqualified versions. */
if (TYPE_P (t))
t = TYPE_MAIN_VARIANT (t);
if (!ftpi->parms.add (t))
ftpi->parm_list = tree_cons (NULL_TREE, t, ftpi->parm_list);
return 0;
}
/* Ensure that we recursively examine certain terms that are not normally
visited in for_each_template_parm_r. */
static int
any_template_parm_r (tree t, void *data)
{
find_template_parameter_info *ftpi = (find_template_parameter_info*)data;
#define WALK_SUBTREE(NODE) \
do \
{ \
for_each_template_parm (NODE, keep_template_parm, data, \
&ftpi->visited, true, \
any_template_parm_r); \
} \
while (0)
/* A mention of a member alias/typedef is a use of all of its template
arguments, including those from the enclosing class, so we don't use
alias_template_specialization_p here. */
if (TYPE_P (t) && typedef_variant_p (t))
if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t))
WALK_SUBTREE (TI_ARGS (tinfo));
switch (TREE_CODE (t))
{
case TEMPLATE_TYPE_PARM:
/* Type constraints of a placeholder type may contain parameters. */
if (is_auto (t))
if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t))
WALK_SUBTREE (constr);
break;
case TEMPLATE_ID_EXPR:
/* Search through references to variable templates. */
WALK_SUBTREE (TREE_OPERAND (t, 0));
WALK_SUBTREE (TREE_OPERAND (t, 1));
break;
case TEMPLATE_PARM_INDEX:
case PARM_DECL:
/* A parameter or constraint variable may also depend on a template
parameter without explicitly naming it. */
WALK_SUBTREE (TREE_TYPE (t));
break;
case TEMPLATE_DECL:
{
/* If T is a member template that shares template parameters with
ctx_parms, we need to mark all those parameters for mapping. */
tree dparms = DECL_TEMPLATE_PARMS (t);
tree cparms = ftpi->ctx_parms;
while (TMPL_PARMS_DEPTH (dparms) > ftpi->max_depth)
dparms = TREE_CHAIN (dparms);
while (TMPL_PARMS_DEPTH (cparms) > TMPL_PARMS_DEPTH (dparms))
cparms = TREE_CHAIN (cparms);
while (dparms
&& (TREE_TYPE (TREE_VALUE (dparms))
!= TREE_TYPE (TREE_VALUE (cparms))))
dparms = TREE_CHAIN (dparms),
cparms = TREE_CHAIN (cparms);
if (dparms)
{
int ddepth = TMPL_PARMS_DEPTH (dparms);
tree dargs = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (t)));
for (int i = 0; i < ddepth; ++i)
WALK_SUBTREE (TMPL_ARGS_LEVEL (dargs, i+1));
}
}
break;
case LAMBDA_EXPR:
{
/* Look in the parms and body. */
tree fn = lambda_function (t);
WALK_SUBTREE (TREE_TYPE (fn));
WALK_SUBTREE (DECL_SAVED_TREE (fn));
}
break;
case IDENTIFIER_NODE:
if (IDENTIFIER_CONV_OP_P (t))
/* The conversion-type-id of a conversion operator may be dependent. */
WALK_SUBTREE (TREE_TYPE (t));
break;
default:
break;
}
/* Keep walking. */
return 0;
}
/* Returns a list of unique template parameters found within T, where CTX_PARMS
are the template parameters in scope. */
tree
find_template_parameters (tree t, tree ctx_parms)
{
if (!ctx_parms)
return NULL_TREE;
find_template_parameter_info ftpi (ctx_parms);
for_each_template_parm (t, keep_template_parm, &ftpi, &ftpi.visited,
/*include_nondeduced*/true, any_template_parm_r);
return ftpi.parm_list;
}
/* Returns true if T depends on any template parameter. */
int
uses_template_parms (tree t)
{
if (t == NULL_TREE)
return false;
bool dependent_p;
int saved_processing_template_decl;
saved_processing_template_decl = processing_template_decl;
if (!saved_processing_template_decl)
processing_template_decl = 1;
if (TYPE_P (t))
dependent_p = dependent_type_p (t);
else if (TREE_CODE (t) == TREE_VEC)
dependent_p = any_dependent_template_arguments_p (t);
else if (TREE_CODE (t) == TREE_LIST)
dependent_p = (uses_template_parms (TREE_VALUE (t))
|| uses_template_parms (TREE_CHAIN (t)));
else if (TREE_CODE (t) == TYPE_DECL)
dependent_p = dependent_type_p (TREE_TYPE (t));
else if (t == error_mark_node)
dependent_p = false;
else
dependent_p = value_dependent_expression_p (t);
processing_template_decl = saved_processing_template_decl;
return dependent_p;
}
/* Returns true iff current_function_decl is an incompletely instantiated
template. Useful instead of processing_template_decl because the latter
is set to 0 during instantiate_non_dependent_expr. */
bool
in_template_function (void)
{
tree fn = current_function_decl;
bool ret;
++processing_template_decl;
ret = (fn && DECL_LANG_SPECIFIC (fn)
&& DECL_TEMPLATE_INFO (fn)
&& any_dependent_template_arguments_p (DECL_TI_ARGS (fn)));
--processing_template_decl;
return ret;
}
/* Returns true if T depends on any template parameter with level LEVEL. */
bool
uses_template_parms_level (tree t, int level)
{
return for_each_template_parm (t, template_parm_this_level_p, &level, NULL,
/*include_nondeduced_p=*/true);
}
/* Returns true if the signature of DECL depends on any template parameter from
its enclosing class. */
bool
uses_outer_template_parms (tree decl)
{
int depth = template_class_depth (CP_DECL_CONTEXT (decl));
if (depth == 0)
return false;
if (for_each_template_parm (TREE_TYPE (decl), template_parm_outer_level,
&depth, NULL, /*include_nondeduced_p=*/true))
return true;
if (PRIMARY_TEMPLATE_P (decl)
&& for_each_template_parm (INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (decl)),
template_parm_outer_level,
&depth, NULL, /*include_nondeduced_p=*/true))
return true;
tree ci = get_constraints (decl);
if (ci)
ci = CI_ASSOCIATED_CONSTRAINTS (ci);
if (ci && for_each_template_parm (ci, template_parm_outer_level,
&depth, NULL, /*nondeduced*/true))
return true;
return false;
}
/* Returns TRUE iff INST is an instantiation we don't need to do in an
ill-formed translation unit, i.e. a variable or function that isn't
usable in a constant expression. */
static inline bool
neglectable_inst_p (tree d)
{
return (d && DECL_P (d)
&& !undeduced_auto_decl (d)
&& !(TREE_CODE (d) == FUNCTION_DECL ? DECL_DECLARED_CONSTEXPR_P (d)
: decl_maybe_constant_var_p (d)));
}
/* Returns TRUE iff we should refuse to instantiate DECL because it's
neglectable and instantiated from within an erroneous instantiation. */
static bool
limit_bad_template_recursion (tree decl)
{
struct tinst_level *lev = current_tinst_level;
int errs = errorcount + sorrycount;
if (lev == NULL || errs == 0 || !neglectable_inst_p (decl))
return false;
for (; lev; lev = lev->next)
if (neglectable_inst_p (lev->maybe_get_node ()))
break;
return (lev && errs > lev->errors);
}
static int tinst_depth;
extern int max_tinst_depth;
int depth_reached;
static GTY(()) struct tinst_level *last_error_tinst_level;
/* We're starting to instantiate D; record the template instantiation context
at LOC for diagnostics and to restore it later. */
static bool
push_tinst_level_loc (tree tldcl, tree targs, location_t loc)
{
struct tinst_level *new_level;
if (tinst_depth >= max_tinst_depth)
{
/* Tell error.c not to try to instantiate any templates. */
at_eof = 2;
fatal_error (input_location,
"template instantiation depth exceeds maximum of %d"
" (use %<-ftemplate-depth=%> to increase the maximum)",
max_tinst_depth);
return false;
}
/* If the current instantiation caused problems, don't let it instantiate
anything else. Do allow deduction substitution and decls usable in
constant expressions. */
if (!targs && limit_bad_template_recursion (tldcl))
{
/* Avoid no_linkage_errors and unused function warnings for this
decl. */
TREE_NO_WARNING (tldcl) = 1;
return false;
}
/* When not -quiet, dump template instantiations other than functions, since
announce_function will take care of those. */
if (!quiet_flag && !targs
&& TREE_CODE (tldcl) != TREE_LIST
&& TREE_CODE (tldcl) != FUNCTION_DECL)
fprintf (stderr, " %s", decl_as_string (tldcl, TFF_DECL_SPECIFIERS));
new_level = tinst_level_freelist ().alloc ();
new_level->tldcl = tldcl;
new_level->targs = targs;
new_level->locus = loc;
new_level->errors = errorcount + sorrycount;
new_level->next = NULL;
new_level->refcount = 0;
set_refcount_ptr (new_level->next, current_tinst_level);
set_refcount_ptr (current_tinst_level, new_level);
++tinst_depth;
if (GATHER_STATISTICS && (tinst_depth > depth_reached))
depth_reached = tinst_depth;
return true;
}
/* We're starting substitution of TMPL<ARGS>; record the template
substitution context for diagnostics and to restore it later. */
static bool
push_tinst_level (tree tmpl, tree args)
{
return push_tinst_level_loc (tmpl, args, input_location);
}
/* We're starting to instantiate D; record INPUT_LOCATION and the
template instantiation context for diagnostics and to restore it
later. */
bool
push_tinst_level (tree d)
{
return push_tinst_level_loc (d, input_location);
}
/* Likewise, but record LOC as the program location. */
bool
push_tinst_level_loc (tree d, location_t loc)
{
gcc_assert (TREE_CODE (d) != TREE_LIST);
return push_tinst_level_loc (d, NULL, loc);
}
/* We're done instantiating this template; return to the instantiation
context. */
void
pop_tinst_level (void)
{
/* Restore the filename and line number stashed away when we started
this instantiation. */
input_location = current_tinst_level->locus;
set_refcount_ptr (current_tinst_level, current_tinst_level->next);
--tinst_depth;
}
/* We're instantiating a deferred template; restore the template
instantiation context in which the instantiation was requested, which
is one step out from LEVEL. Return the corresponding DECL or TYPE. */
static tree
reopen_tinst_level (struct tinst_level *level)
{
struct tinst_level *t;
tinst_depth = 0;
for (t = level; t; t = t->next)
++tinst_depth;
set_refcount_ptr (current_tinst_level, level);
pop_tinst_level ();
if (current_tinst_level)
current_tinst_level->errors = errorcount+sorrycount;
return level->maybe_get_node ();
}
/* Returns the TINST_LEVEL which gives the original instantiation
context. */
struct tinst_level *
outermost_tinst_level (void)
{
struct tinst_level *level = current_tinst_level;
if (level)
while (level->next)
level = level->next;
return level;
}
/* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL. ARGS is the
vector of template arguments, as for tsubst.
Returns an appropriate tsubst'd friend declaration. */
static tree
tsubst_friend_function (tree decl, tree args)
{
tree new_friend;
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_TEMPLATE_INSTANTIATION (decl)
&& TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL)
/* This was a friend declared with an explicit template
argument list, e.g.:
friend void f<>(T);
to indicate that f was a template instantiation, not a new
function declaration. Now, we have to figure out what
instantiation of what template. */
{
tree template_id, arglist, fns;
tree new_args;
tree tmpl;
tree ns = decl_namespace_context (TYPE_MAIN_DECL (current_class_type));
/* Friend functions are looked up in the containing namespace scope.
We must enter that scope, to avoid finding member functions of the
current class with same name. */
push_nested_namespace (ns);
fns = tsubst_expr (DECL_TI_TEMPLATE (decl), args,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/false);
pop_nested_namespace (ns);
arglist = tsubst (DECL_TI_ARGS (decl), args,
tf_warning_or_error, NULL_TREE);
template_id = lookup_template_function (fns, arglist);
new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE);
tmpl = determine_specialization (template_id, new_friend,
&new_args,
/*need_member_template=*/0,
TREE_VEC_LENGTH (args),
tsk_none);
return instantiate_template (tmpl, new_args, tf_error);
}
new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE);
/* The NEW_FRIEND will look like an instantiation, to the
compiler, but is not an instantiation from the point of view of
the language. For example, we might have had:
template <class T> struct S {
template <class U> friend void f(T, U);
};
Then, in S<int>, template <class U> void f(int, U) is not an
instantiation of anything. */
if (new_friend == error_mark_node)
return error_mark_node;
DECL_USE_TEMPLATE (new_friend) = 0;
if (TREE_CODE (decl) == TEMPLATE_DECL)
{
DECL_USE_TEMPLATE (DECL_TEMPLATE_RESULT (new_friend)) = 0;
DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (new_friend))
= DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (decl));
/* Substitute TEMPLATE_PARMS_CONSTRAINTS so that parameter levels will
match in decls_match. */
tree parms = DECL_TEMPLATE_PARMS (new_friend);
tree treqs = TEMPLATE_PARMS_CONSTRAINTS (parms);
treqs = maybe_substitute_reqs_for (treqs, new_friend);
TEMPLATE_PARMS_CONSTRAINTS (parms) = treqs;
}
/* The mangled name for the NEW_FRIEND is incorrect. The function
is not a template instantiation and should not be mangled like
one. Therefore, we forget the mangling here; we'll recompute it
later if we need it. */
if (TREE_CODE (new_friend) != TEMPLATE_DECL)
{
SET_DECL_RTL (new_friend, NULL);
SET_DECL_ASSEMBLER_NAME (new_friend, NULL_TREE);
}
if (DECL_NAMESPACE_SCOPE_P (new_friend))
{
tree old_decl;
tree new_friend_template_info;
tree new_friend_result_template_info;
tree ns;
int new_friend_is_defn;
/* We must save some information from NEW_FRIEND before calling
duplicate decls since that function will free NEW_FRIEND if
possible. */
new_friend_template_info = DECL_TEMPLATE_INFO (new_friend);
new_friend_is_defn =
(DECL_INITIAL (DECL_TEMPLATE_RESULT
(template_for_substitution (new_friend)))
!= NULL_TREE);
if (TREE_CODE (new_friend) == TEMPLATE_DECL)
{
/* This declaration is a `primary' template. */
DECL_PRIMARY_TEMPLATE (new_friend) = new_friend;
new_friend_result_template_info
= DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (new_friend));
}
else
new_friend_result_template_info = NULL_TREE;
/* Inside pushdecl_namespace_level, we will push into the
current namespace. However, the friend function should go
into the namespace of the template. */
ns = decl_namespace_context (new_friend);
push_nested_namespace (ns);
old_decl = pushdecl_namespace_level (new_friend, /*is_friend=*/true);
pop_nested_namespace (ns);
if (old_decl == error_mark_node)
return error_mark_node;
if (old_decl != new_friend)
{
/* This new friend declaration matched an existing
declaration. For example, given:
template <class T> void f(T);
template <class U> class C {
template <class T> friend void f(T) {}
};
the friend declaration actually provides the definition
of `f', once C has been instantiated for some type. So,
old_decl will be the out-of-class template declaration,
while new_friend is the in-class definition.
But, if `f' was called before this point, the
instantiation of `f' will have DECL_TI_ARGS corresponding
to `T' but not to `U', references to which might appear
in the definition of `f'. Previously, the most general
template for an instantiation of `f' was the out-of-class
version; now it is the in-class version. Therefore, we
run through all specialization of `f', adding to their
DECL_TI_ARGS appropriately. In particular, they need a
new set of outer arguments, corresponding to the
arguments for this class instantiation.
The same situation can arise with something like this:
friend void f(int);
template <class T> class C {
friend void f(T) {}
};
when `C<int>' is instantiated. Now, `f(int)' is defined
in the class. */
if (!new_friend_is_defn)
/* On the other hand, if the in-class declaration does
*not* provide a definition, then we don't want to alter
existing definitions. We can just leave everything
alone. */
;
else
{
tree new_template = TI_TEMPLATE (new_friend_template_info);
tree new_args = TI_ARGS (new_friend_template_info);
/* Overwrite whatever template info was there before, if
any, with the new template information pertaining to
the declaration. */
DECL_TEMPLATE_INFO (old_decl) = new_friend_template_info;
if (TREE_CODE (old_decl) != TEMPLATE_DECL)
{
/* We should have called reregister_specialization in
duplicate_decls. */
gcc_assert (retrieve_specialization (new_template,
new_args, 0)
== old_decl);
/* Instantiate it if the global has already been used. */
if (DECL_ODR_USED (old_decl))
instantiate_decl (old_decl, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/false);
}
else
{
tree t;
/* Indicate that the old function template is a partial
instantiation. */
DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (old_decl))
= new_friend_result_template_info;
gcc_assert (new_template
== most_general_template (new_template));
gcc_assert (new_template != old_decl);
/* Reassign any specializations already in the hash table
to the new more general template, and add the
additional template args. */
for (t = DECL_TEMPLATE_INSTANTIATIONS (old_decl);
t != NULL_TREE;
t = TREE_CHAIN (t))
{
tree spec = TREE_VALUE (t);
spec_entry elt;
elt.tmpl = old_decl;
elt.args = DECL_TI_ARGS (spec);
elt.spec = NULL_TREE;
decl_specializations->remove_elt (&elt);
DECL_TI_ARGS (spec)
= add_outermost_template_args (new_args,
DECL_TI_ARGS (spec));
register_specialization
(spec, new_template, DECL_TI_ARGS (spec), true, 0);
}
DECL_TEMPLATE_INSTANTIATIONS (old_decl) = NULL_TREE;
}
}
/* The information from NEW_FRIEND has been merged into OLD_DECL
by duplicate_decls. */
new_friend = old_decl;
}
}
else
{
tree context = DECL_CONTEXT (new_friend);
bool dependent_p;
/* In the code
template <class T> class C {
template <class U> friend void C1<U>::f (); // case 1
friend void C2<T>::f (); // case 2
};
we only need to make sure CONTEXT is a complete type for
case 2. To distinguish between the two cases, we note that
CONTEXT of case 1 remains dependent type after tsubst while
this isn't true for case 2. */
++processing_template_decl;
dependent_p = dependent_type_p (context);
--processing_template_decl;
if (!dependent_p
&& !complete_type_or_else (context, NULL_TREE))
return error_mark_node;
if (COMPLETE_TYPE_P (context))
{
tree fn = new_friend;
/* do_friend adds the TEMPLATE_DECL for any member friend
template even if it isn't a member template, i.e.
template <class T> friend A<T>::f();
Look through it in that case. */
if (TREE_CODE (fn) == TEMPLATE_DECL
&& !PRIMARY_TEMPLATE_P (fn))
fn = DECL_TEMPLATE_RESULT (fn);
/* Check to see that the declaration is really present, and,
possibly obtain an improved declaration. */
fn = check_classfn (context, fn, NULL_TREE);
if (fn)
new_friend = fn;
}
}
return new_friend;
}
/* FRIEND_TMPL is a friend TEMPLATE_DECL. ARGS is the vector of
template arguments, as for tsubst.
Returns an appropriate tsubst'd friend type or error_mark_node on
failure. */
static tree
tsubst_friend_class (tree friend_tmpl, tree args)
{
tree tmpl;
if (DECL_TEMPLATE_TEMPLATE_PARM_P (friend_tmpl))
{
tmpl = tsubst (TREE_TYPE (friend_tmpl), args, tf_none, NULL_TREE);
return TREE_TYPE (tmpl);
}
tree context = CP_DECL_CONTEXT (friend_tmpl);
if (TREE_CODE (context) == NAMESPACE_DECL)
push_nested_namespace (context);
else
{
context = tsubst (context, args, tf_error, NULL_TREE);
push_nested_class (context);
}
tmpl = lookup_name_real (DECL_NAME (friend_tmpl), /*prefer_type=*/false,
/*non_class=*/false, /*block_p=*/false,
/*namespaces_only=*/false, LOOKUP_HIDDEN);
if (tmpl && DECL_CLASS_TEMPLATE_P (tmpl))
{
/* The friend template has already been declared. Just
check to see that the declarations match, and install any new
default parameters. We must tsubst the default parameters,
of course. We only need the innermost template parameters
because that is all that redeclare_class_template will look
at. */
if (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (friend_tmpl))
> TMPL_ARGS_DEPTH (args))
{
tree parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_tmpl),
args, tf_warning_or_error);
location_t saved_input_location = input_location;
input_location = DECL_SOURCE_LOCATION (friend_tmpl);
tree cons = get_constraints (tmpl);
redeclare_class_template (TREE_TYPE (tmpl), parms, cons);
input_location = saved_input_location;
}
}
else
{
/* The friend template has not already been declared. In this
case, the instantiation of the template class will cause the
injection of this template into the namespace scope. */
tmpl = tsubst (friend_tmpl, args, tf_warning_or_error, NULL_TREE);
if (tmpl != error_mark_node)
{
/* The new TMPL is not an instantiation of anything, so we
forget its origins. We don't reset CLASSTYPE_TI_TEMPLATE
for the new type because that is supposed to be the
corresponding template decl, i.e., TMPL. */
DECL_USE_TEMPLATE (tmpl) = 0;
DECL_TEMPLATE_INFO (tmpl) = NULL_TREE;
CLASSTYPE_USE_TEMPLATE (TREE_TYPE (tmpl)) = 0;
CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl))
= INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl)));
/* It is hidden. */
retrofit_lang_decl (DECL_TEMPLATE_RESULT (tmpl));
DECL_ANTICIPATED (tmpl)
= DECL_ANTICIPATED (DECL_TEMPLATE_RESULT (tmpl)) = true;
/* Substitute into and set the constraints on the new declaration. */
if (tree ci = get_constraints (friend_tmpl))
{
++processing_template_decl;
ci = tsubst_constraint_info (ci, args, tf_warning_or_error,
DECL_FRIEND_CONTEXT (friend_tmpl));
--processing_template_decl;
set_constraints (tmpl, ci);
}
/* Inject this template into the enclosing namspace scope. */
tmpl = pushdecl_namespace_level (tmpl, true);
}
}
if (TREE_CODE (context) == NAMESPACE_DECL)
pop_nested_namespace (context);
else
pop_nested_class ();
return TREE_TYPE (tmpl);
}
/* Returns zero if TYPE cannot be completed later due to circularity.
Otherwise returns one. */
static int
can_complete_type_without_circularity (tree type)
{
if (type == NULL_TREE || type == error_mark_node)
return 0;
else if (COMPLETE_TYPE_P (type))
return 1;
else if (TREE_CODE (type) == ARRAY_TYPE)
return can_complete_type_without_circularity (TREE_TYPE (type));
else if (CLASS_TYPE_P (type)
&& TYPE_BEING_DEFINED (TYPE_MAIN_VARIANT (type)))
return 0;
else
return 1;
}
static tree tsubst_omp_clauses (tree, enum c_omp_region_type, tree,
tsubst_flags_t, tree);
/* Instantiate a single dependent attribute T (a TREE_LIST), and return either
T or a new TREE_LIST, possibly a chain in the case of a pack expansion. */
static tree
tsubst_attribute (tree t, tree *decl_p, tree args,
tsubst_flags_t complain, tree in_decl)
{
gcc_assert (ATTR_IS_DEPENDENT (t));
tree val = TREE_VALUE (t);
if (val == NULL_TREE)
/* Nothing to do. */;
else if ((flag_openmp || flag_openmp_simd)
&& is_attribute_p ("omp declare simd",
get_attribute_name (t)))
{
tree clauses = TREE_VALUE (val);
clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args,
complain, in_decl);
c_omp_declare_simd_clauses_to_decls (*decl_p, clauses);
clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD);
tree parms = DECL_ARGUMENTS (*decl_p);
clauses
= c_omp_declare_simd_clauses_to_numbers (parms, clauses);
if (clauses)
val = build_tree_list (NULL_TREE, clauses);
else
val = NULL_TREE;
}
else if (flag_openmp
&& is_attribute_p ("omp declare variant base",
get_attribute_name (t)))
{
++cp_unevaluated_operand;
tree varid
= tsubst_expr (TREE_PURPOSE (val), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
tree chain = TREE_CHAIN (val);
location_t match_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (chain));
tree ctx = copy_list (TREE_VALUE (val));
tree simd = get_identifier ("simd");
tree score = get_identifier (" score");
tree condition = get_identifier ("condition");
for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
{
const char *set = IDENTIFIER_POINTER (TREE_PURPOSE (t1));
TREE_VALUE (t1) = copy_list (TREE_VALUE (t1));
for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
{
if (TREE_PURPOSE (t2) == simd && set[0] == 'c')
{
tree clauses = TREE_VALUE (t2);
clauses = tsubst_omp_clauses (clauses,
C_ORT_OMP_DECLARE_SIMD, args,
complain, in_decl);
c_omp_declare_simd_clauses_to_decls (*decl_p, clauses);
clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD);
TREE_VALUE (t2) = clauses;
}
else
{
TREE_VALUE (t2) = copy_list (TREE_VALUE (t2));
for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3))
if (TREE_VALUE (t3))
{
bool allow_string
= ((TREE_PURPOSE (t2) != condition || set[0] != 'u')
&& TREE_PURPOSE (t3) != score);
tree v = TREE_VALUE (t3);
if (TREE_CODE (v) == STRING_CST && allow_string)
continue;
v = tsubst_expr (v, args, complain, in_decl, true);
v = fold_non_dependent_expr (v);
if (!INTEGRAL_TYPE_P (TREE_TYPE (v))
|| (TREE_PURPOSE (t3) == score
? TREE_CODE (v) != INTEGER_CST
: !tree_fits_shwi_p (v)))
{
location_t loc
= cp_expr_loc_or_loc (TREE_VALUE (t3),
match_loc);
if (TREE_PURPOSE (t3) == score)
error_at (loc, "score argument must be "
"constant integer expression");
else if (allow_string)
error_at (loc, "property must be constant "
"integer expression or string "
"literal");
else
error_at (loc, "property must be constant "
"integer expression");
return NULL_TREE;
}
else if (TREE_PURPOSE (t3) == score
&& tree_int_cst_sgn (v) < 0)
{
location_t loc
= cp_expr_loc_or_loc (TREE_VALUE (t3),
match_loc);
error_at (loc, "score argument must be "
"non-negative");
return NULL_TREE;
}
TREE_VALUE (t3) = v;
}
}
}
}
val = tree_cons (varid, ctx, chain);
}
/* If the first attribute argument is an identifier, don't
pass it through tsubst. Attributes like mode, format,
cleanup and several target specific attributes expect it
unmodified. */
else if (attribute_takes_identifier_p (get_attribute_name (t)))
{
tree chain
= tsubst_expr (TREE_CHAIN (val), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
if (chain != TREE_CHAIN (val))
val = tree_cons (NULL_TREE, TREE_VALUE (val), chain);
}
else if (PACK_EXPANSION_P (val))
{
/* An attribute pack expansion. */
tree purp = TREE_PURPOSE (t);
tree pack = tsubst_pack_expansion (val, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
int len = TREE_VEC_LENGTH (pack);
tree list = NULL_TREE;
tree *q = &list;
for (int i = 0; i < len; ++i)
{
tree elt = TREE_VEC_ELT (pack, i);
*q = build_tree_list (purp, elt);
q = &TREE_CHAIN (*q);
}
return list;
}
else
val = tsubst_expr (val, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
if (val != TREE_VALUE (t))
return build_tree_list (TREE_PURPOSE (t), val);
return t;
}
/* Instantiate any dependent attributes in ATTRIBUTES, returning either it
unchanged or a new TREE_LIST chain. */
static tree
tsubst_attributes (tree attributes, tree args,
tsubst_flags_t complain, tree in_decl)
{
tree last_dep = NULL_TREE;
for (tree t = attributes; t; t = TREE_CHAIN (t))
if (ATTR_IS_DEPENDENT (t))
{
last_dep = t;
attributes = copy_list (attributes);
break;
}
if (last_dep)
for (tree *p = &attributes; *p; )
{
tree t = *p;
if (ATTR_IS_DEPENDENT (t))
{
tree subst = tsubst_attribute (t, NULL, args, complain, in_decl);
if (subst != t)
{
*p = subst;
while (*p)
p = &TREE_CHAIN (*p);
*p = TREE_CHAIN (t);
continue;
}
}
p = &TREE_CHAIN (*p);
}
return attributes;
}
/* Apply any attributes which had to be deferred until instantiation
time. DECL_P, ATTRIBUTES and ATTR_FLAGS are as cplus_decl_attributes;
ARGS, COMPLAIN, IN_DECL are as tsubst. */
static void
apply_late_template_attributes (tree *decl_p, tree attributes, int attr_flags,
tree args, tsubst_flags_t complain, tree in_decl)
{
tree last_dep = NULL_TREE;
tree t;
tree *p;
if (attributes == NULL_TREE)
return;
if (DECL_P (*decl_p))
{
if (TREE_TYPE (*decl_p) == error_mark_node)
return;
p = &DECL_ATTRIBUTES (*decl_p);
/* DECL_ATTRIBUTES comes from copy_node in tsubst_decl, and is identical
to our attributes parameter. */
gcc_assert (*p == attributes);
}
else
{
p = &TYPE_ATTRIBUTES (*decl_p);
/* TYPE_ATTRIBUTES was set up (with abi_tag and may_alias) in
lookup_template_class_1, and should be preserved. */
gcc_assert (*p != attributes);
while (*p)
p = &TREE_CHAIN (*p);
}
for (t = attributes; t; t = TREE_CHAIN (t))
if (ATTR_IS_DEPENDENT (t))
{
last_dep = t;
attributes = copy_list (attributes);
break;
}
*p = attributes;
if (last_dep)
{
tree late_attrs = NULL_TREE;
tree *q = &late_attrs;
for (; *p; )
{
t = *p;
if (ATTR_IS_DEPENDENT (t))
{
*p = TREE_CHAIN (t);
TREE_CHAIN (t) = NULL_TREE;
*q = tsubst_attribute (t, decl_p, args, complain, in_decl);
while (*q)
q = &TREE_CHAIN (*q);
}
else
p = &TREE_CHAIN (t);
}
cplus_decl_attributes (decl_p, late_attrs, attr_flags);
}
}
/* Perform (or defer) access check for typedefs that were referenced
from within the template TMPL code.
This is a subroutine of instantiate_decl and instantiate_class_template.
TMPL is the template to consider and TARGS is the list of arguments of
that template. */
static void
perform_typedefs_access_check (tree tmpl, tree targs)
{
unsigned i;
qualified_typedef_usage_t *iter;
if (!tmpl
|| (!CLASS_TYPE_P (tmpl)
&& TREE_CODE (tmpl) != FUNCTION_DECL))
return;
FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (tmpl), i, iter)
{
tree type_decl = iter->typedef_decl;
tree type_scope = iter->context;
if (!type_decl || !type_scope || !CLASS_TYPE_P (type_scope))
continue;
if (uses_template_parms (type_decl))
type_decl = tsubst (type_decl, targs, tf_error, NULL_TREE);
if (uses_template_parms (type_scope))
type_scope = tsubst (type_scope, targs, tf_error, NULL_TREE);
/* Make access check error messages point to the location
of the use of the typedef. */
iloc_sentinel ils (iter->locus);
perform_or_defer_access_check (TYPE_BINFO (type_scope),
type_decl, type_decl,
tf_warning_or_error);
}
}
static tree
instantiate_class_template_1 (tree type)
{
tree templ, args, pattern, t, member;
tree typedecl;
tree pbinfo;
tree base_list;
unsigned int saved_maximum_field_alignment;
tree fn_context;
if (type == error_mark_node)
return error_mark_node;
if (COMPLETE_OR_OPEN_TYPE_P (type)
|| uses_template_parms (type))
return type;
/* Figure out which template is being instantiated. */
templ = most_general_template (CLASSTYPE_TI_TEMPLATE (type));
gcc_assert (TREE_CODE (templ) == TEMPLATE_DECL);
/* Mark the type as in the process of being defined. */
TYPE_BEING_DEFINED (type) = 1;
/* We may be in the middle of deferred access check. Disable
it now. */
deferring_access_check_sentinel acs (dk_no_deferred);
/* Determine what specialization of the original template to
instantiate. */
t = most_specialized_partial_spec (type, tf_warning_or_error);
if (t == error_mark_node)
return error_mark_node;
else if (t)
{
/* This TYPE is actually an instantiation of a partial
specialization. We replace the innermost set of ARGS with
the arguments appropriate for substitution. For example,
given:
template <class T> struct S {};
template <class T> struct S<T*> {};
and supposing that we are instantiating S<int*>, ARGS will
presently be {int*} -- but we need {int}. */
pattern = TREE_TYPE (t);
args = TREE_PURPOSE (t);
}
else
{
pattern = TREE_TYPE (templ);
args = CLASSTYPE_TI_ARGS (type);
}
/* If the template we're instantiating is incomplete, then clearly
there's nothing we can do. */
if (!COMPLETE_TYPE_P (pattern))
{
/* We can try again later. */
TYPE_BEING_DEFINED (type) = 0;
return type;
}
/* If we've recursively instantiated too many templates, stop. */
if (! push_tinst_level (type))
return type;
int saved_unevaluated_operand = cp_unevaluated_operand;
int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
fn_context = decl_function_context (TYPE_MAIN_DECL (type));
/* Also avoid push_to_top_level for a lambda in an NSDMI. */
if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type))
fn_context = error_mark_node;
if (!fn_context)
push_to_top_level ();
else
{
cp_unevaluated_operand = 0;
c_inhibit_evaluation_warnings = 0;
}
/* Use #pragma pack from the template context. */
saved_maximum_field_alignment = maximum_field_alignment;
maximum_field_alignment = TYPE_PRECISION (pattern);
SET_CLASSTYPE_INTERFACE_UNKNOWN (type);
/* Set the input location to the most specialized template definition.
This is needed if tsubsting causes an error. */
typedecl = TYPE_MAIN_DECL (pattern);
input_location = DECL_SOURCE_LOCATION (TYPE_NAME (type)) =
DECL_SOURCE_LOCATION (typedecl);
TYPE_PACKED (type) = TYPE_PACKED (pattern);
SET_TYPE_ALIGN (type, TYPE_ALIGN (pattern));
TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (pattern);
CLASSTYPE_NON_AGGREGATE (type) = CLASSTYPE_NON_AGGREGATE (pattern);
if (ANON_AGGR_TYPE_P (pattern))
SET_ANON_AGGR_TYPE_P (type);
if (CLASSTYPE_VISIBILITY_SPECIFIED (pattern))
{
CLASSTYPE_VISIBILITY_SPECIFIED (type) = 1;
CLASSTYPE_VISIBILITY (type) = CLASSTYPE_VISIBILITY (pattern);
/* Adjust visibility for template arguments. */
determine_visibility (TYPE_MAIN_DECL (type));
}
if (CLASS_TYPE_P (type))
CLASSTYPE_FINAL (type) = CLASSTYPE_FINAL (pattern);
pbinfo = TYPE_BINFO (pattern);
/* We should never instantiate a nested class before its enclosing
class; we need to look up the nested class by name before we can
instantiate it, and that lookup should instantiate the enclosing
class. */
gcc_assert (!DECL_CLASS_SCOPE_P (TYPE_MAIN_DECL (pattern))
|| COMPLETE_OR_OPEN_TYPE_P (TYPE_CONTEXT (type)));
base_list = NULL_TREE;
if (BINFO_N_BASE_BINFOS (pbinfo))
{
tree pbase_binfo;
tree pushed_scope;
int i;
/* We must enter the scope containing the type, as that is where
the accessibility of types named in dependent bases are
looked up from. */
pushed_scope = push_scope (CP_TYPE_CONTEXT (type));
/* Substitute into each of the bases to determine the actual
basetypes. */
for (i = 0; BINFO_BASE_ITERATE (pbinfo, i, pbase_binfo); i++)
{
tree base;
tree access = BINFO_BASE_ACCESS (pbinfo, i);
tree expanded_bases = NULL_TREE;
int idx, len = 1;
if (PACK_EXPANSION_P (BINFO_TYPE (pbase_binfo)))
{
expanded_bases =
tsubst_pack_expansion (BINFO_TYPE (pbase_binfo),
args, tf_error, NULL_TREE);
if (expanded_bases == error_mark_node)
continue;
len = TREE_VEC_LENGTH (expanded_bases);
}
for (idx = 0; idx < len; idx++)
{
if (expanded_bases)
/* Extract the already-expanded base class. */
base = TREE_VEC_ELT (expanded_bases, idx);
else
/* Substitute to figure out the base class. */
base = tsubst (BINFO_TYPE (pbase_binfo), args, tf_error,
NULL_TREE);
if (base == error_mark_node)
continue;
base_list = tree_cons (access, base, base_list);
if (BINFO_VIRTUAL_P (pbase_binfo))
TREE_TYPE (base_list) = integer_type_node;
}
}
/* The list is now in reverse order; correct that. */
base_list = nreverse (base_list);
if (pushed_scope)
pop_scope (pushed_scope);
}
/* Now call xref_basetypes to set up all the base-class
information. */
xref_basetypes (type, base_list);
apply_late_template_attributes (&type, TYPE_ATTRIBUTES (pattern),
(int) ATTR_FLAG_TYPE_IN_PLACE,
args, tf_error, NULL_TREE);
fixup_attribute_variants (type);
/* Now that our base classes are set up, enter the scope of the
class, so that name lookups into base classes, etc. will work
correctly. This is precisely analogous to what we do in
begin_class_definition when defining an ordinary non-template
class, except we also need to push the enclosing classes. */
push_nested_class (type);
/* Now members are processed in the order of declaration. */
for (member = CLASSTYPE_DECL_LIST (pattern);
member; member = TREE_CHAIN (member))
{
tree t = TREE_VALUE (member);
if (TREE_PURPOSE (member))
{
if (TYPE_P (t))
{
if (LAMBDA_TYPE_P (t))
/* A closure type for a lambda in an NSDMI or default argument.
Ignore it; it will be regenerated when needed. */
continue;
/* Build new CLASSTYPE_NESTED_UTDS. */
tree newtag;
bool class_template_p;
class_template_p = (TREE_CODE (t) != ENUMERAL_TYPE
&& TYPE_LANG_SPECIFIC (t)
&& CLASSTYPE_IS_TEMPLATE (t));
/* If the member is a class template, then -- even after
substitution -- there may be dependent types in the
template argument list for the class. We increment
PROCESSING_TEMPLATE_DECL so that dependent_type_p, as
that function will assume that no types are dependent
when outside of a template. */
if (class_template_p)
++processing_template_decl;
newtag = tsubst (t, args, tf_error, NULL_TREE);
if (class_template_p)
--processing_template_decl;
if (newtag == error_mark_node)
continue;
if (TREE_CODE (newtag) != ENUMERAL_TYPE)
{
tree name = TYPE_IDENTIFIER (t);
if (class_template_p)
/* Unfortunately, lookup_template_class sets
CLASSTYPE_IMPLICIT_INSTANTIATION for a partial
instantiation (i.e., for the type of a member
template class nested within a template class.)
This behavior is required for
maybe_process_partial_specialization to work
correctly, but is not accurate in this case;
the TAG is not an instantiation of anything.
(The corresponding TEMPLATE_DECL is an
instantiation, but the TYPE is not.) */
CLASSTYPE_USE_TEMPLATE (newtag) = 0;
/* Now, we call pushtag to put this NEWTAG into the scope of
TYPE. We first set up the IDENTIFIER_TYPE_VALUE to avoid
pushtag calling push_template_decl. We don't have to do
this for enums because it will already have been done in
tsubst_enum. */
if (name)
SET_IDENTIFIER_TYPE_VALUE (name, newtag);
pushtag (name, newtag, /*tag_scope=*/ts_current);
}
}
else if (DECL_DECLARES_FUNCTION_P (t))
{
tree r;
if (TREE_CODE (t) == TEMPLATE_DECL)
++processing_template_decl;
r = tsubst (t, args, tf_error, NULL_TREE);
if (TREE_CODE (t) == TEMPLATE_DECL)
--processing_template_decl;
set_current_access_from_decl (r);
finish_member_declaration (r);
/* Instantiate members marked with attribute used. */
if (r != error_mark_node && DECL_PRESERVE_P (r))
mark_used (r);
if (TREE_CODE (r) == FUNCTION_DECL
&& DECL_OMP_DECLARE_REDUCTION_P (r))
cp_check_omp_declare_reduction (r);
}
else if ((DECL_CLASS_TEMPLATE_P (t) || DECL_IMPLICIT_TYPEDEF_P (t))
&& LAMBDA_TYPE_P (TREE_TYPE (t)))
/* A closure type for a lambda in an NSDMI or default argument.
Ignore it; it will be regenerated when needed. */;
else
{
/* Build new TYPE_FIELDS. */
if (TREE_CODE (t) == STATIC_ASSERT)
{
tree condition;
++c_inhibit_evaluation_warnings;
condition =
tsubst_expr (STATIC_ASSERT_CONDITION (t), args,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/true);
--c_inhibit_evaluation_warnings;
finish_static_assert (condition,
STATIC_ASSERT_MESSAGE (t),
STATIC_ASSERT_SOURCE_LOCATION (t),
/*member_p=*/true);
}
else if (TREE_CODE (t) != CONST_DECL)
{
tree r;
tree vec = NULL_TREE;
int len = 1;
/* The file and line for this declaration, to
assist in error message reporting. Since we
called push_tinst_level above, we don't need to
restore these. */
input_location = DECL_SOURCE_LOCATION (t);
if (TREE_CODE (t) == TEMPLATE_DECL)
++processing_template_decl;
r = tsubst (t, args, tf_warning_or_error, NULL_TREE);
if (TREE_CODE (t) == TEMPLATE_DECL)
--processing_template_decl;
if (TREE_CODE (r) == TREE_VEC)
{
/* A capture pack became multiple fields. */
vec = r;
len = TREE_VEC_LENGTH (vec);
}
for (int i = 0; i < len; ++i)
{
if (vec)
r = TREE_VEC_ELT (vec, i);
if (VAR_P (r))
{
/* In [temp.inst]:
[t]he initialization (and any associated
side-effects) of a static data member does
not occur unless the static data member is
itself used in a way that requires the
definition of the static data member to
exist.
Therefore, we do not substitute into the
initialized for the static data member here. */
finish_static_data_member_decl
(r,
/*init=*/NULL_TREE,
/*init_const_expr_p=*/false,
/*asmspec_tree=*/NULL_TREE,
/*flags=*/0);
/* Instantiate members marked with attribute used. */
if (r != error_mark_node && DECL_PRESERVE_P (r))
mark_used (r);
}
else if (TREE_CODE (r) == FIELD_DECL)
{
/* Determine whether R has a valid type and can be
completed later. If R is invalid, then its type
is replaced by error_mark_node. */
tree rtype = TREE_TYPE (r);
if (can_complete_type_without_circularity (rtype))
complete_type (rtype);
if (!complete_or_array_type_p (rtype))
{
/* If R's type couldn't be completed and
it isn't a flexible array member (whose
type is incomplete by definition) give
an error. */
cxx_incomplete_type_error (r, rtype);
TREE_TYPE (r) = error_mark_node;
}
else if (TREE_CODE (rtype) == ARRAY_TYPE
&& TYPE_DOMAIN (rtype) == NULL_TREE
&& (TREE_CODE (type) == UNION_TYPE
|| TREE_CODE (type) == QUAL_UNION_TYPE))
{
error ("flexible array member %qD in union", r);
TREE_TYPE (r) = error_mark_node;
}
else if (!verify_type_context (input_location,
TCTX_FIELD, rtype))
TREE_TYPE (r) = error_mark_node;
}
/* If it is a TYPE_DECL for a class-scoped ENUMERAL_TYPE,
such a thing will already have been added to the field
list by tsubst_enum in finish_member_declaration in the
CLASSTYPE_NESTED_UTDS case above. */
if (!(TREE_CODE (r) == TYPE_DECL
&& TREE_CODE (TREE_TYPE (r)) == ENUMERAL_TYPE
&& DECL_ARTIFICIAL (r)))
{
set_current_access_from_decl (r);
finish_member_declaration (r);
}
}
}
}
}
else
{
if (TYPE_P (t) || DECL_CLASS_TEMPLATE_P (t)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (t))
{
/* Build new CLASSTYPE_FRIEND_CLASSES. */
tree friend_type = t;
bool adjust_processing_template_decl = false;
if (TREE_CODE (friend_type) == TEMPLATE_DECL)
{
/* template <class T> friend class C; */
friend_type = tsubst_friend_class (friend_type, args);
adjust_processing_template_decl = true;
}
else if (TREE_CODE (friend_type) == UNBOUND_CLASS_TEMPLATE)
{
/* template <class T> friend class C::D; */
friend_type = tsubst (friend_type, args,
tf_warning_or_error, NULL_TREE);
if (TREE_CODE (friend_type) == TEMPLATE_DECL)
friend_type = TREE_TYPE (friend_type);
adjust_processing_template_decl = true;
}
else if (TREE_CODE (friend_type) == TYPENAME_TYPE
|| TREE_CODE (friend_type) == TEMPLATE_TYPE_PARM)
{
/* This could be either
friend class T::C;
when dependent_type_p is false or
template <class U> friend class T::C;
otherwise. */
/* Bump processing_template_decl in case this is something like
template <class T> friend struct A<T>::B. */
++processing_template_decl;
friend_type = tsubst (friend_type, args,
tf_warning_or_error, NULL_TREE);
if (dependent_type_p (friend_type))
adjust_processing_template_decl = true;
--processing_template_decl;
}
else if (TREE_CODE (friend_type) != BOUND_TEMPLATE_TEMPLATE_PARM
&& !CLASSTYPE_USE_TEMPLATE (friend_type)
&& TYPE_HIDDEN_P (friend_type))
{
/* friend class C;
where C hasn't been declared yet. Let's lookup name
from namespace scope directly, bypassing any name that
come from dependent base class. */
tree ns = decl_namespace_context (TYPE_MAIN_DECL (friend_type));
/* The call to xref_tag_from_type does injection for friend
classes. */
push_nested_namespace (ns);
friend_type =
xref_tag_from_type (friend_type, NULL_TREE,
/*tag_scope=*/ts_current);
pop_nested_namespace (ns);
}
else if (uses_template_parms (friend_type))
/* friend class C<T>; */
friend_type = tsubst (friend_type, args,
tf_warning_or_error, NULL_TREE);
/* Otherwise it's
friend class C;
where C is already declared or
friend class C<int>;
We don't have to do anything in these cases. */
if (adjust_processing_template_decl)
/* Trick make_friend_class into realizing that the friend
we're adding is a template, not an ordinary class. It's
important that we use make_friend_class since it will
perform some error-checking and output cross-reference
information. */
++processing_template_decl;
if (friend_type != error_mark_node)
make_friend_class (type, friend_type, /*complain=*/false);
if (adjust_processing_template_decl)
--processing_template_decl;
}
else
{
/* Build new DECL_FRIENDLIST. */
tree r;
/* The file and line for this declaration, to
assist in error message reporting. Since we
called push_tinst_level above, we don't need to
restore these. */
input_location = DECL_SOURCE_LOCATION (t);
if (TREE_CODE (t) == TEMPLATE_DECL)
{
++processing_template_decl;
push_deferring_access_checks (dk_no_check);
}
r = tsubst_friend_function (t, args);
add_friend (type, r, /*complain=*/false);
if (TREE_CODE (t) == TEMPLATE_DECL)
{
pop_deferring_access_checks ();
--processing_template_decl;
}
}
}
}
if (fn_context)
{
/* Restore these before substituting into the lambda capture
initializers. */
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
}
/* Set the file and line number information to whatever is given for
the class itself. This puts error messages involving generated
implicit functions at a predictable point, and the same point
that would be used for non-template classes. */
input_location = DECL_SOURCE_LOCATION (typedecl);
unreverse_member_declarations (type);
finish_struct_1 (type);
TYPE_BEING_DEFINED (type) = 0;
/* We don't instantiate default arguments for member functions. 14.7.1:
The implicit instantiation of a class template specialization causes
the implicit instantiation of the declarations, but not of the
definitions or default arguments, of the class member functions,
member classes, static data members and member templates.... */
/* Some typedefs referenced from within the template code need to be access
checked at template instantiation time, i.e now. These types were
added to the template at parsing time. Let's get those and perform
the access checks then. */
perform_typedefs_access_check (pattern, args);
perform_deferred_access_checks (tf_warning_or_error);
pop_nested_class ();
maximum_field_alignment = saved_maximum_field_alignment;
if (!fn_context)
pop_from_top_level ();
pop_tinst_level ();
/* The vtable for a template class can be emitted in any translation
unit in which the class is instantiated. When there is no key
method, however, finish_struct_1 will already have added TYPE to
the keyed_classes. */
if (TYPE_CONTAINS_VPTR_P (type) && CLASSTYPE_KEY_METHOD (type))
vec_safe_push (keyed_classes, type);
return type;
}
/* Wrapper for instantiate_class_template_1. */
tree
instantiate_class_template (tree type)
{
tree ret;
timevar_push (TV_TEMPLATE_INST);
ret = instantiate_class_template_1 (type);
timevar_pop (TV_TEMPLATE_INST);
return ret;
}
tree
tsubst_template_arg (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
tree r;
if (!t)
r = t;
else if (TYPE_P (t))
r = tsubst (t, args, complain, in_decl);
else
{
if (!(complain & tf_warning))
++c_inhibit_evaluation_warnings;
r = tsubst_expr (t, args, complain, in_decl,
/*integral_constant_expression_p=*/true);
if (!(complain & tf_warning))
--c_inhibit_evaluation_warnings;
}
return r;
}
/* Given a function parameter pack TMPL_PARM and some function parameters
instantiated from it at *SPEC_P, return a NONTYPE_ARGUMENT_PACK of them
and set *SPEC_P to point at the next point in the list. */
tree
extract_fnparm_pack (tree tmpl_parm, tree *spec_p)
{
/* Collect all of the extra "packed" parameters into an
argument pack. */
tree argpack;
tree spec_parm = *spec_p;
int len;
for (len = 0; spec_parm; ++len, spec_parm = TREE_CHAIN (spec_parm))
if (tmpl_parm
&& !function_parameter_expanded_from_pack_p (spec_parm, tmpl_parm))
break;
spec_parm = *spec_p;
if (len == 1 && DECL_PACK_P (spec_parm))
{
/* The instantiation is still a parameter pack; don't wrap it in a
NONTYPE_ARGUMENT_PACK. */
argpack = spec_parm;
spec_parm = DECL_CHAIN (spec_parm);
}
else
{
/* Fill in PARMVEC with all of the parameters. */
tree parmvec = make_tree_vec (len);
argpack = make_node (NONTYPE_ARGUMENT_PACK);
for (int i = 0; i < len; i++)
{
tree elt = spec_parm;
if (DECL_PACK_P (elt))
elt = make_pack_expansion (elt);
TREE_VEC_ELT (parmvec, i) = elt;
spec_parm = DECL_CHAIN (spec_parm);
}
/* Build the argument packs. */
SET_ARGUMENT_PACK_ARGS (argpack, parmvec);
}
*spec_p = spec_parm;
return argpack;
}
/* Give a chain SPEC_PARM of PARM_DECLs, pack them into a
NONTYPE_ARGUMENT_PACK. */
static tree
make_fnparm_pack (tree spec_parm)
{
return extract_fnparm_pack (NULL_TREE, &spec_parm);
}
/* Return 1 if the Ith element of the argument pack ARG_PACK is a
pack expansion with no extra args, 2 if it has extra args, or 0
if it is not a pack expansion. */
static int
argument_pack_element_is_expansion_p (tree arg_pack, int i)
{
if (TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT)
/* We're being called before this happens in tsubst_pack_expansion. */
arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack);
tree vec = ARGUMENT_PACK_ARGS (arg_pack);
if (i >= TREE_VEC_LENGTH (vec))
return 0;
tree elt = TREE_VEC_ELT (vec, i);
if (DECL_P (elt))
/* A decl pack is itself an expansion. */
elt = TREE_TYPE (elt);
if (!PACK_EXPANSION_P (elt))
return 0;
if (PACK_EXPANSION_EXTRA_ARGS (elt))
return 2;
return 1;
}
/* Creates and return an ARGUMENT_PACK_SELECT tree node. */
static tree
make_argument_pack_select (tree arg_pack, unsigned index)
{
tree aps = make_node (ARGUMENT_PACK_SELECT);
ARGUMENT_PACK_SELECT_FROM_PACK (aps) = arg_pack;
ARGUMENT_PACK_SELECT_INDEX (aps) = index;
return aps;
}
/* This is a subroutine of tsubst_pack_expansion.
It returns TRUE if we need to use the PACK_EXPANSION_EXTRA_ARGS
mechanism to store the (non complete list of) arguments of the
substitution and return a non substituted pack expansion, in order
to wait for when we have enough arguments to really perform the
substitution. */
static bool
use_pack_expansion_extra_args_p (tree parm_packs,
int arg_pack_len,
bool has_empty_arg)
{
/* If one pack has an expansion and another pack has a normal
argument or if one pack has an empty argument and an another
one hasn't then tsubst_pack_expansion cannot perform the
substitution and need to fall back on the
PACK_EXPANSION_EXTRA mechanism. */
if (parm_packs == NULL_TREE)
return false;
else if (has_empty_arg)
{
/* If all the actual packs are pack expansions, we can still
subsitute directly. */
for (tree p = parm_packs; p; p = TREE_CHAIN (p))
{
tree a = TREE_VALUE (p);
if (TREE_CODE (a) == ARGUMENT_PACK_SELECT)
a = ARGUMENT_PACK_SELECT_FROM_PACK (a);
a = ARGUMENT_PACK_ARGS (a);
if (TREE_VEC_LENGTH (a) == 1)
a = TREE_VEC_ELT (a, 0);
if (PACK_EXPANSION_P (a))
continue;
return true;
}
return false;
}
bool has_expansion_arg = false;
for (int i = 0 ; i < arg_pack_len; ++i)
{
bool has_non_expansion_arg = false;
for (tree parm_pack = parm_packs;
parm_pack;
parm_pack = TREE_CHAIN (parm_pack))
{
tree arg = TREE_VALUE (parm_pack);
int exp = argument_pack_element_is_expansion_p (arg, i);
if (exp == 2)
/* We can't substitute a pack expansion with extra args into
our pattern. */
return true;
else if (exp)
has_expansion_arg = true;
else
has_non_expansion_arg = true;
}
if (has_expansion_arg && has_non_expansion_arg)
return true;
}
return false;
}
/* [temp.variadic]/6 says that:
The instantiation of a pack expansion [...]
produces a list E1,E2, ..., En, where N is the number of elements
in the pack expansion parameters.
This subroutine of tsubst_pack_expansion produces one of these Ei.
PATTERN is the pattern of the pack expansion. PARM_PACKS is a
TREE_LIST in which each TREE_PURPOSE is a parameter pack of
PATTERN, and each TREE_VALUE is its corresponding argument pack.
INDEX is the index 'i' of the element Ei to produce. ARGS,
COMPLAIN, and IN_DECL are the same parameters as for the
tsubst_pack_expansion function.
The function returns the resulting Ei upon successful completion,
or error_mark_node.
Note that this function possibly modifies the ARGS parameter, so
it's the responsibility of the caller to restore it. */
static tree
gen_elem_of_pack_expansion_instantiation (tree pattern,
tree parm_packs,
unsigned index,
tree args /* This parm gets
modified. */,
tsubst_flags_t complain,
tree in_decl)
{
tree t;
bool ith_elem_is_expansion = false;
/* For each parameter pack, change the substitution of the parameter
pack to the ith argument in its argument pack, then expand the
pattern. */
for (tree pack = parm_packs; pack; pack = TREE_CHAIN (pack))
{
tree parm = TREE_PURPOSE (pack);
tree arg_pack = TREE_VALUE (pack);
tree aps; /* instance of ARGUMENT_PACK_SELECT. */
ith_elem_is_expansion |=
argument_pack_element_is_expansion_p (arg_pack, index);
/* Select the Ith argument from the pack. */
if (TREE_CODE (parm) == PARM_DECL
|| VAR_P (parm)
|| TREE_CODE (parm) == FIELD_DECL)
{
if (index == 0)
{
aps = make_argument_pack_select (arg_pack, index);
if (!mark_used (parm, complain) && !(complain & tf_error))
return error_mark_node;
register_local_specialization (aps, parm);
}
else
aps = retrieve_local_specialization (parm);
}
else
{
int idx, level;
template_parm_level_and_index (parm, &level, &idx);
if (index == 0)
{
aps = make_argument_pack_select (arg_pack, index);
/* Update the corresponding argument. */
TMPL_ARG (args, level, idx) = aps;
}
else
/* Re-use the ARGUMENT_PACK_SELECT. */
aps = TMPL_ARG (args, level, idx);
}
ARGUMENT_PACK_SELECT_INDEX (aps) = index;
}
/* Substitute into the PATTERN with the (possibly altered)
arguments. */
if (pattern == in_decl)
/* Expanding a fixed parameter pack from
coerce_template_parameter_pack. */
t = tsubst_decl (pattern, args, complain);
else if (pattern == error_mark_node)
t = error_mark_node;
else if (!TYPE_P (pattern))
t = tsubst_expr (pattern, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
else
t = tsubst (pattern, args, complain, in_decl);
/* If the Ith argument pack element is a pack expansion, then
the Ith element resulting from the substituting is going to
be a pack expansion as well. */
if (ith_elem_is_expansion)
t = make_pack_expansion (t, complain);
return t;
}
/* When the unexpanded parameter pack in a fold expression expands to an empty
sequence, the value of the expression is as follows; the program is
ill-formed if the operator is not listed in this table.
&& true
|| false
, void() */
tree
expand_empty_fold (tree t, tsubst_flags_t complain)
{
tree_code code = (tree_code)TREE_INT_CST_LOW (TREE_OPERAND (t, 0));
if (!FOLD_EXPR_MODIFY_P (t))
switch (code)
{
case TRUTH_ANDIF_EXPR:
return boolean_true_node;
case TRUTH_ORIF_EXPR:
return boolean_false_node;
case COMPOUND_EXPR:
return void_node;
default:
break;
}
if (complain & tf_error)
error_at (location_of (t),
"fold of empty expansion over %O", code);
return error_mark_node;
}
/* Given a fold-expression T and a current LEFT and RIGHT operand,
form an expression that combines the two terms using the
operator of T. */
static tree
fold_expression (tree t, tree left, tree right, tsubst_flags_t complain)
{
tree op = FOLD_EXPR_OP (t);
tree_code code = (tree_code)TREE_INT_CST_LOW (op);
// Handle compound assignment operators.
if (FOLD_EXPR_MODIFY_P (t))
return build_x_modify_expr (input_location, left, code, right, complain);
warning_sentinel s(warn_parentheses);
switch (code)
{
case COMPOUND_EXPR:
return build_x_compound_expr (input_location, left, right, complain);
default:
return build_x_binary_op (input_location, code,
left, TREE_CODE (left),
right, TREE_CODE (right),
/*overload=*/NULL,
complain);
}
}
/* Substitute ARGS into the pack of a fold expression T. */
static inline tree
tsubst_fold_expr_pack (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
return tsubst_pack_expansion (FOLD_EXPR_PACK (t), args, complain, in_decl);
}
/* Substitute ARGS into the pack of a fold expression T. */
static inline tree
tsubst_fold_expr_init (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
return tsubst_expr (FOLD_EXPR_INIT (t), args, complain, in_decl, false);
}
/* Expand a PACK of arguments into a grouped as left fold.
Given a pack containing elements A0, A1, ..., An and an
operator @, this builds the expression:
((A0 @ A1) @ A2) ... @ An
Note that PACK must not be empty.
The operator is defined by the original fold expression T. */
static tree
expand_left_fold (tree t, tree pack, tsubst_flags_t complain)
{
tree left = TREE_VEC_ELT (pack, 0);
for (int i = 1; i < TREE_VEC_LENGTH (pack); ++i)
{
tree right = TREE_VEC_ELT (pack, i);
left = fold_expression (t, left, right, complain);
}
return left;
}
/* Substitute into a unary left fold expression. */
static tree
tsubst_unary_left_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
return r;
}
if (TREE_VEC_LENGTH (pack) == 0)
return expand_empty_fold (t, complain);
else
return expand_left_fold (t, pack, complain);
}
/* Substitute into a binary left fold expression.
Do ths by building a single (non-empty) vector of argumnts and
building the expression from those elements. */
static tree
tsubst_binary_left_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
tree init = tsubst_fold_expr_init (t, args, complain, in_decl);
if (init == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
FOLD_EXPR_INIT (r) = init;
return r;
}
tree vec = make_tree_vec (TREE_VEC_LENGTH (pack) + 1);
TREE_VEC_ELT (vec, 0) = init;
for (int i = 0; i < TREE_VEC_LENGTH (pack); ++i)
TREE_VEC_ELT (vec, i + 1) = TREE_VEC_ELT (pack, i);
return expand_left_fold (t, vec, complain);
}
/* Expand a PACK of arguments into a grouped as right fold.
Given a pack containing elementns A0, A1, ..., and an
operator @, this builds the expression:
A0@ ... (An-2 @ (An-1 @ An))
Note that PACK must not be empty.
The operator is defined by the original fold expression T. */
tree
expand_right_fold (tree t, tree pack, tsubst_flags_t complain)
{
// Build the expression.
int n = TREE_VEC_LENGTH (pack);
tree right = TREE_VEC_ELT (pack, n - 1);
for (--n; n != 0; --n)
{
tree left = TREE_VEC_ELT (pack, n - 1);
right = fold_expression (t, left, right, complain);
}
return right;
}
/* Substitute into a unary right fold expression. */
static tree
tsubst_unary_right_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
return r;
}
if (TREE_VEC_LENGTH (pack) == 0)
return expand_empty_fold (t, complain);
else
return expand_right_fold (t, pack, complain);
}
/* Substitute into a binary right fold expression.
Do ths by building a single (non-empty) vector of arguments and
building the expression from those elements. */
static tree
tsubst_binary_right_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
tree init = tsubst_fold_expr_init (t, args, complain, in_decl);
if (init == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
FOLD_EXPR_INIT (r) = init;
return r;
}
int n = TREE_VEC_LENGTH (pack);
tree vec = make_tree_vec (n + 1);
for (int i = 0; i < n; ++i)
TREE_VEC_ELT (vec, i) = TREE_VEC_ELT (pack, i);
TREE_VEC_ELT (vec, n) = init;
return expand_right_fold (t, vec, complain);
}
/* Walk through the pattern of a pack expansion, adding everything in
local_specializations to a list. */
class el_data
{
public:
hash_set<tree> internal;
tree extra;
tsubst_flags_t complain;
el_data (tsubst_flags_t c)
: extra (NULL_TREE), complain (c) {}
};
static tree
extract_locals_r (tree *tp, int */*walk_subtrees*/, void *data_)
{
el_data &data = *reinterpret_cast<el_data*>(data_);
tree *extra = &data.extra;
tsubst_flags_t complain = data.complain;
if (TYPE_P (*tp) && typedef_variant_p (*tp))
/* Remember local typedefs (85214). */
tp = &TYPE_NAME (*tp);
if (TREE_CODE (*tp) == DECL_EXPR)
data.internal.add (DECL_EXPR_DECL (*tp));
else if (tree spec = retrieve_local_specialization (*tp))
{
if (data.internal.contains (*tp))
/* Don't mess with variables declared within the pattern. */
return NULL_TREE;
if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK)
{
/* Maybe pull out the PARM_DECL for a partial instantiation. */
tree args = ARGUMENT_PACK_ARGS (spec);
if (TREE_VEC_LENGTH (args) == 1)
{
tree elt = TREE_VEC_ELT (args, 0);
if (PACK_EXPANSION_P (elt))
elt = PACK_EXPANSION_PATTERN (elt);
if (DECL_PACK_P (elt))
spec = elt;
}
if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK)
{
/* Handle lambda capture here, since we aren't doing any
substitution now, and so tsubst_copy won't call
process_outer_var_ref. */
tree args = ARGUMENT_PACK_ARGS (spec);
int len = TREE_VEC_LENGTH (args);
for (int i = 0; i < len; ++i)
{
tree arg = TREE_VEC_ELT (args, i);
tree carg = arg;
if (outer_automatic_var_p (arg))
carg = process_outer_var_ref (arg, complain);
if (carg != arg)
{
/* Make a new NONTYPE_ARGUMENT_PACK of the capture
proxies. */
if (i == 0)
{
spec = copy_node (spec);
args = copy_node (args);
SET_ARGUMENT_PACK_ARGS (spec, args);
register_local_specialization (spec, *tp);
}
TREE_VEC_ELT (args, i) = carg;
}
}
}
}
if (outer_automatic_var_p (spec))
spec = process_outer_var_ref (spec, complain);
*extra = tree_cons (*tp, spec, *extra);
}
return NULL_TREE;
}
static tree
extract_local_specs (tree pattern, tsubst_flags_t complain)
{
el_data data (complain);
cp_walk_tree_without_duplicates (&pattern, extract_locals_r, &data);
return data.extra;
}
/* Extract any uses of local_specializations from PATTERN and add them to ARGS
for use in PACK_EXPANSION_EXTRA_ARGS. */
tree
build_extra_args (tree pattern, tree args, tsubst_flags_t complain)
{
tree extra = args;
if (local_specializations)
if (tree locals = extract_local_specs (pattern, complain))
extra = tree_cons (NULL_TREE, extra, locals);
return extra;
}
/* Apply any local specializations from PACK_EXPANSION_EXTRA_ARGS and add the
normal template args to ARGS. */
tree
add_extra_args (tree extra, tree args)
{
if (extra && TREE_CODE (extra) == TREE_LIST)
{
for (tree elt = TREE_CHAIN (extra); elt; elt = TREE_CHAIN (elt))
{
/* The partial instantiation involved local declarations collected in
extract_local_specs; map from the general template to our local
context. */
tree gen = TREE_PURPOSE (elt);
tree inst = TREE_VALUE (elt);
if (DECL_P (inst))
if (tree local = retrieve_local_specialization (inst))
inst = local;
/* else inst is already a full instantiation of the pack. */
register_local_specialization (inst, gen);
}
gcc_assert (!TREE_PURPOSE (extra));
extra = TREE_VALUE (extra);
}
#if 1
/* I think we should always be able to substitute dependent args into the
pattern. If that turns out to be incorrect in some cases, enable the
alternate code (and add complain/in_decl parms to this function). */
gcc_checking_assert (!uses_template_parms (extra));
#else
if (!uses_template_parms (extra))
{
gcc_unreachable ();
extra = tsubst_template_args (extra, args, complain, in_decl);
args = add_outermost_template_args (args, extra);
}
else
#endif
args = add_to_template_args (extra, args);
return args;
}
/* Substitute ARGS into T, which is an pack expansion
(i.e. TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION). Returns a
TREE_VEC with the substituted arguments, a PACK_EXPANSION_* node
(if only a partial substitution could be performed) or
ERROR_MARK_NODE if there was an error. */
tree
tsubst_pack_expansion (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pattern;
tree pack, packs = NULL_TREE;
bool unsubstituted_packs = false;
int i, len = -1;
tree result;
bool need_local_specializations = false;
int levels;
gcc_assert (PACK_EXPANSION_P (t));
pattern = PACK_EXPANSION_PATTERN (t);
/* Add in any args remembered from an earlier partial instantiation. */
args = add_extra_args (PACK_EXPANSION_EXTRA_ARGS (t), args);
levels = TMPL_ARGS_DEPTH (args);
/* Determine the argument packs that will instantiate the parameter
packs used in the expansion expression. While we're at it,
compute the number of arguments to be expanded and make sure it
is consistent. */
for (pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack;
pack = TREE_CHAIN (pack))
{
tree parm_pack = TREE_VALUE (pack);
tree arg_pack = NULL_TREE;
tree orig_arg = NULL_TREE;
int level = 0;
if (TREE_CODE (parm_pack) == BASES)
{
gcc_assert (parm_pack == pattern);
if (BASES_DIRECT (parm_pack))
return calculate_direct_bases (tsubst_expr (BASES_TYPE (parm_pack),
args, complain,
in_decl, false),
complain);
else
return calculate_bases (tsubst_expr (BASES_TYPE (parm_pack),
args, complain, in_decl,
false), complain);
}
else if (builtin_pack_call_p (parm_pack))
{
if (parm_pack != pattern)
{
if (complain & tf_error)
sorry ("%qE is not the entire pattern of the pack expansion",
parm_pack);
return error_mark_node;
}
return expand_builtin_pack_call (parm_pack, args,
complain, in_decl);
}
else if (TREE_CODE (parm_pack) == PARM_DECL)
{
/* We know we have correct local_specializations if this
expansion is at function scope, or if we're dealing with a
local parameter in a requires expression; for the latter,
tsubst_requires_expr set it up appropriately. */
if (PACK_EXPANSION_LOCAL_P (t) || CONSTRAINT_VAR_P (parm_pack))
arg_pack = retrieve_local_specialization (parm_pack);
else
/* We can't rely on local_specializations for a parameter
name used later in a function declaration (such as in a
late-specified return type). Even if it exists, it might
have the wrong value for a recursive call. */
need_local_specializations = true;
if (!arg_pack)
{
/* This parameter pack was used in an unevaluated context. Just
make a dummy decl, since it's only used for its type. */
++cp_unevaluated_operand;
arg_pack = tsubst_decl (parm_pack, args, complain);
--cp_unevaluated_operand;
if (arg_pack && DECL_PACK_P (arg_pack))
/* Partial instantiation of the parm_pack, we can't build
up an argument pack yet. */
arg_pack = NULL_TREE;
else
arg_pack = make_fnparm_pack (arg_pack);
}
else if (DECL_PACK_P (arg_pack))
/* This argument pack isn't fully instantiated yet. */
arg_pack = NULL_TREE;
}
else if (is_capture_proxy (parm_pack))
{
arg_pack = retrieve_local_specialization (parm_pack);
if (DECL_PACK_P (arg_pack))
arg_pack = NULL_TREE;
}
else
{
int idx;
template_parm_level_and_index (parm_pack, &level, &idx);
if (level <= levels)
arg_pack = TMPL_ARG (args, level, idx);
if (arg_pack && TREE_CODE (arg_pack) == TEMPLATE_TYPE_PARM
&& TEMPLATE_TYPE_PARAMETER_PACK (arg_pack))
arg_pack = NULL_TREE;
}
orig_arg = arg_pack;
if (arg_pack && TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT)
arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack);
if (arg_pack && !ARGUMENT_PACK_P (arg_pack))
/* This can only happen if we forget to expand an argument
pack somewhere else. Just return an error, silently. */
{
result = make_tree_vec (1);
TREE_VEC_ELT (result, 0) = error_mark_node;
return result;
}
if (arg_pack)
{
int my_len =
TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg_pack));
/* Don't bother trying to do a partial substitution with
incomplete packs; we'll try again after deduction. */
if (ARGUMENT_PACK_INCOMPLETE_P (arg_pack))
return t;
if (len < 0)
len = my_len;
else if (len != my_len)
{
if (!(complain & tf_error))
/* Fail quietly. */;
else if (TREE_CODE (t) == TYPE_PACK_EXPANSION)
error ("mismatched argument pack lengths while expanding %qT",
pattern);
else
error ("mismatched argument pack lengths while expanding %qE",
pattern);
return error_mark_node;
}
/* Keep track of the parameter packs and their corresponding
argument packs. */
packs = tree_cons (parm_pack, arg_pack, packs);
TREE_TYPE (packs) = orig_arg;
}
else
{
/* We can't substitute for this parameter pack. We use a flag as
well as the missing_level counter because function parameter
packs don't have a level. */
gcc_assert (processing_template_decl || is_auto (parm_pack));
unsubstituted_packs = true;
}
}
/* If the expansion is just T..., return the matching argument pack, unless
we need to call convert_from_reference on all the elements. This is an
important optimization; see c++/68422. */
if (!unsubstituted_packs
&& TREE_PURPOSE (packs) == pattern)
{
tree args = ARGUMENT_PACK_ARGS (TREE_VALUE (packs));
/* If the argument pack is a single pack expansion, pull it out. */
if (TREE_VEC_LENGTH (args) == 1
&& pack_expansion_args_count (args))
return TREE_VEC_ELT (args, 0);
/* Types need no adjustment, nor does sizeof..., and if we still have
some pack expansion args we won't do anything yet. */
if (TREE_CODE (t) == TYPE_PACK_EXPANSION
|| PACK_EXPANSION_SIZEOF_P (t)
|| pack_expansion_args_count (args))
return args;
/* Also optimize expression pack expansions if we can tell that the
elements won't have reference type. */
tree type = TREE_TYPE (pattern);
if (type && !TYPE_REF_P (type)
&& !PACK_EXPANSION_P (type)
&& !WILDCARD_TYPE_P (type))
return args;
/* Otherwise use the normal path so we get convert_from_reference. */
}
/* We cannot expand this expansion expression, because we don't have
all of the argument packs we need. */
if (use_pack_expansion_extra_args_p (packs, len, unsubstituted_packs))
{
/* We got some full packs, but we can't substitute them in until we
have values for all the packs. So remember these until then. */
t = make_pack_expansion (pattern, complain);
PACK_EXPANSION_EXTRA_ARGS (t)
= build_extra_args (pattern, args, complain);
return t;
}
/* If NEED_LOCAL_SPECIALIZATIONS then we're in a late-specified return
type, so create our own local specializations map; the current map is
either NULL or (in the case of recursive unification) might have
bindings that we don't want to use or alter. */
local_specialization_stack lss (need_local_specializations
? lss_blank : lss_nop);
if (unsubstituted_packs)
{
/* There were no real arguments, we're just replacing a parameter
pack with another version of itself. Substitute into the
pattern and return a PACK_EXPANSION_*. The caller will need to
deal with that. */
if (TREE_CODE (t) == EXPR_PACK_EXPANSION)
t = tsubst_expr (pattern, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
else
t = tsubst (pattern, args, complain, in_decl);
t = make_pack_expansion (t, complain);
return t;
}
gcc_assert (len >= 0);
/* For each argument in each argument pack, substitute into the
pattern. */
result = make_tree_vec (len);
tree elem_args = copy_template_args (args);
for (i = 0; i < len; ++i)
{
t = gen_elem_of_pack_expansion_instantiation (pattern, packs,
i,
elem_args, complain,
in_decl);
TREE_VEC_ELT (result, i) = t;
if (t == error_mark_node)
{
result = error_mark_node;
break;
}
}
/* Update ARGS to restore the substitution from parameter packs to
their argument packs. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
tree parm = TREE_PURPOSE (pack);
if (TREE_CODE (parm) == PARM_DECL
|| VAR_P (parm)
|| TREE_CODE (parm) == FIELD_DECL)
register_local_specialization (TREE_TYPE (pack), parm);
else
{
int idx, level;
if (TREE_VALUE (pack) == NULL_TREE)
continue;
template_parm_level_and_index (parm, &level, &idx);
/* Update the corresponding argument. */
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args))
TREE_VEC_ELT (TREE_VEC_ELT (args, level -1 ), idx) =
TREE_TYPE (pack);
else
TREE_VEC_ELT (args, idx) = TREE_TYPE (pack);
}
}
/* If the dependent pack arguments were such that we end up with only a
single pack expansion again, there's no need to keep it in a TREE_VEC. */
if (len == 1 && TREE_CODE (result) == TREE_VEC
&& PACK_EXPANSION_P (TREE_VEC_ELT (result, 0)))
return TREE_VEC_ELT (result, 0);
return result;
}
/* Given PARM_DECL PARM, find the corresponding PARM_DECL in the template
TMPL. We do this using DECL_PARM_INDEX, which should work even with
parameter packs; all parms generated from a function parameter pack will
have the same DECL_PARM_INDEX. */
tree
get_pattern_parm (tree parm, tree tmpl)
{
tree pattern = DECL_TEMPLATE_RESULT (tmpl);
tree patparm;
if (DECL_ARTIFICIAL (parm))
{
for (patparm = DECL_ARGUMENTS (pattern);
patparm; patparm = DECL_CHAIN (patparm))
if (DECL_ARTIFICIAL (patparm)
&& DECL_NAME (parm) == DECL_NAME (patparm))
break;
}
else
{
patparm = FUNCTION_FIRST_USER_PARM (DECL_TEMPLATE_RESULT (tmpl));
patparm = chain_index (DECL_PARM_INDEX (parm)-1, patparm);
gcc_assert (DECL_PARM_INDEX (patparm)
== DECL_PARM_INDEX (parm));
}
return patparm;
}
/* Make an argument pack out of the TREE_VEC VEC. */
static tree
make_argument_pack (tree vec)
{
tree pack;
tree elt = TREE_VEC_ELT (vec, 0);
if (TYPE_P (elt))
pack = cxx_make_type (TYPE_ARGUMENT_PACK);
else
{
pack = make_node (NONTYPE_ARGUMENT_PACK);
TREE_CONSTANT (pack) = 1;
}
SET_ARGUMENT_PACK_ARGS (pack, vec);
return pack;
}
/* Return an exact copy of template args T that can be modified
independently. */
static tree
copy_template_args (tree t)
{
if (t == error_mark_node)
return t;
int len = TREE_VEC_LENGTH (t);
tree new_vec = make_tree_vec (len);
for (int i = 0; i < len; ++i)
{
tree elt = TREE_VEC_ELT (t, i);
if (elt && TREE_CODE (elt) == TREE_VEC)
elt = copy_template_args (elt);
TREE_VEC_ELT (new_vec, i) = elt;
}
NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_vec)
= NON_DEFAULT_TEMPLATE_ARGS_COUNT (t);
return new_vec;
}
/* Substitute ARGS into the *_ARGUMENT_PACK orig_arg. */
tree
tsubst_argument_pack (tree orig_arg, tree args, tsubst_flags_t complain,
tree in_decl)
{
/* Substitute into each of the arguments. */
tree new_arg = TYPE_P (orig_arg)
? cxx_make_type (TREE_CODE (orig_arg))
: make_node (TREE_CODE (orig_arg));
tree pack_args = tsubst_template_args (ARGUMENT_PACK_ARGS (orig_arg),
args, complain, in_decl);
if (pack_args == error_mark_node)
new_arg = error_mark_node;
else
SET_ARGUMENT_PACK_ARGS (new_arg, pack_args);
if (TREE_CODE (new_arg) == NONTYPE_ARGUMENT_PACK)
TREE_CONSTANT (new_arg) = TREE_CONSTANT (orig_arg);
return new_arg;
}
/* Substitute ARGS into the vector or list of template arguments T. */
tree
tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
tree orig_t = t;
int len, need_new = 0, i, expanded_len_adjust = 0, out;
tree *elts;
if (t == error_mark_node)
return error_mark_node;
len = TREE_VEC_LENGTH (t);
elts = XALLOCAVEC (tree, len);
for (i = 0; i < len; i++)
{
tree orig_arg = TREE_VEC_ELT (t, i);
tree new_arg;
if (TREE_CODE (orig_arg) == TREE_VEC)
new_arg = tsubst_template_args (orig_arg, args, complain, in_decl);
else if (PACK_EXPANSION_P (orig_arg))
{
/* Substitute into an expansion expression. */
new_arg = tsubst_pack_expansion (orig_arg, args, complain, in_decl);
if (TREE_CODE (new_arg) == TREE_VEC)
/* Add to the expanded length adjustment the number of
expanded arguments. We subtract one from this
measurement, because the argument pack expression
itself is already counted as 1 in
LEN. EXPANDED_LEN_ADJUST can actually be negative, if
the argument pack is empty. */
expanded_len_adjust += TREE_VEC_LENGTH (new_arg) - 1;
}
else if (ARGUMENT_PACK_P (orig_arg))
new_arg = tsubst_argument_pack (orig_arg, args, complain, in_decl);
else
new_arg = tsubst_template_arg (orig_arg, args, complain, in_decl);
if (new_arg == error_mark_node)
return error_mark_node;
elts[i] = new_arg;
if (new_arg != orig_arg)
need_new = 1;
}
if (!need_new)
return t;
/* Make space for the expanded arguments coming from template
argument packs. */
t = make_tree_vec (len + expanded_len_adjust);
/* ORIG_T can contain TREE_VECs. That happens if ORIG_T contains the
arguments for a member template.
In that case each TREE_VEC in ORIG_T represents a level of template
arguments, and ORIG_T won't carry any non defaulted argument count.
It will rather be the nested TREE_VECs that will carry one.
In other words, ORIG_T carries a non defaulted argument count only
if it doesn't contain any nested TREE_VEC. */
if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t))
{
int count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t);
count += expanded_len_adjust;
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (t, count);
}
for (i = 0, out = 0; i < len; i++)
{
if ((PACK_EXPANSION_P (TREE_VEC_ELT (orig_t, i))
|| ARGUMENT_PACK_P (TREE_VEC_ELT (orig_t, i)))
&& TREE_CODE (elts[i]) == TREE_VEC)
{
int idx;
/* Now expand the template argument pack "in place". */
for (idx = 0; idx < TREE_VEC_LENGTH (elts[i]); idx++, out++)
TREE_VEC_ELT (t, out) = TREE_VEC_ELT (elts[i], idx);
}
else
{
TREE_VEC_ELT (t, out) = elts[i];
out++;
}
}
return t;
}
/* Substitute ARGS into one level PARMS of template parameters. */
static tree
tsubst_template_parms_level (tree parms, tree args, tsubst_flags_t complain)
{
if (parms == error_mark_node)
return error_mark_node;
tree new_vec = make_tree_vec (TREE_VEC_LENGTH (parms));
for (int i = 0; i < TREE_VEC_LENGTH (new_vec); ++i)
{
tree tuple = TREE_VEC_ELT (parms, i);
if (tuple == error_mark_node)
continue;
TREE_VEC_ELT (new_vec, i) =
tsubst_template_parm (tuple, args, complain);
}
return new_vec;
}
/* Return the result of substituting ARGS into the template parameters
given by PARMS. If there are m levels of ARGS and m + n levels of
PARMS, then the result will contain n levels of PARMS. For
example, if PARMS is `template <class T> template <class U>
template <T*, U, class V>' and ARGS is {{int}, {double}} then the
result will be `template <int*, double, class V>'. */
static tree
tsubst_template_parms (tree parms, tree args, tsubst_flags_t complain)
{
tree r = NULL_TREE;
tree* new_parms;
/* When substituting into a template, we must set
PROCESSING_TEMPLATE_DECL as the template parameters may be
dependent if they are based on one-another, and the dependency
predicates are short-circuit outside of templates. */
++processing_template_decl;
for (new_parms = &r;
parms && TMPL_PARMS_DEPTH (parms) > TMPL_ARGS_DEPTH (args);
new_parms = &(TREE_CHAIN (*new_parms)),
parms = TREE_CHAIN (parms))
{
tree new_vec = tsubst_template_parms_level (TREE_VALUE (parms),
args, complain);
*new_parms =
tree_cons (size_int (TMPL_PARMS_DEPTH (parms)
- TMPL_ARGS_DEPTH (args)),
new_vec, NULL_TREE);
TEMPLATE_PARMS_CONSTRAINTS (*new_parms)
= TEMPLATE_PARMS_CONSTRAINTS (parms);
}
--processing_template_decl;
return r;
}
/* Return the result of substituting ARGS into one template parameter
given by T. T Must be a TREE_LIST which TREE_VALUE is the template
parameter and which TREE_PURPOSE is the default argument of the
template parameter. */
static tree
tsubst_template_parm (tree t, tree args, tsubst_flags_t complain)
{
tree default_value, parm_decl;
if (args == NULL_TREE
|| t == NULL_TREE
|| t == error_mark_node)
return t;
gcc_assert (TREE_CODE (t) == TREE_LIST);
default_value = TREE_PURPOSE (t);
parm_decl = TREE_VALUE (t);
tree constraint = TEMPLATE_PARM_CONSTRAINTS (t);
parm_decl = tsubst (parm_decl, args, complain, NULL_TREE);
if (TREE_CODE (parm_decl) == PARM_DECL
&& invalid_nontype_parm_type_p (TREE_TYPE (parm_decl), complain))
parm_decl = error_mark_node;
default_value = tsubst_template_arg (default_value, args,
complain, NULL_TREE);
constraint = tsubst_constraint (constraint, args, complain, NULL_TREE);
tree r = build_tree_list (default_value, parm_decl);
TEMPLATE_PARM_CONSTRAINTS (r) = constraint;
return r;
}
/* Substitute the ARGS into the indicated aggregate (or enumeration)
type T. If T is not an aggregate or enumeration type, it is
handled as if by tsubst. IN_DECL is as for tsubst. If
ENTERING_SCOPE is nonzero, T is the context for a template which
we are presently tsubst'ing. Return the substituted value. */
static tree
tsubst_aggr_type (tree t,
tree args,
tsubst_flags_t complain,
tree in_decl,
int entering_scope)
{
if (t == NULL_TREE)
return NULL_TREE;
switch (TREE_CODE (t))
{
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
return tsubst (TYPE_PTRMEMFUNC_FN_TYPE (t), args, complain, in_decl);
/* Fall through. */
case ENUMERAL_TYPE:
case UNION_TYPE:
if (TYPE_TEMPLATE_INFO (t) && uses_template_parms (t))
{
tree argvec;
tree context;
tree r;
/* In "sizeof(X<I>)" we need to evaluate "I". */
cp_evaluated ev;
/* First, determine the context for the type we are looking
up. */
context = TYPE_CONTEXT (t);
if (context && TYPE_P (context))
{
context = tsubst_aggr_type (context, args, complain,
in_decl, /*entering_scope=*/1);
/* If context is a nested class inside a class template,
it may still need to be instantiated (c++/33959). */
context = complete_type (context);
}
/* Then, figure out what arguments are appropriate for the
type we are trying to find. For example, given:
template <class T> struct S;
template <class T, class U> void f(T, U) { S<U> su; }
and supposing that we are instantiating f<int, double>,
then our ARGS will be {int, double}, but, when looking up
S we only want {double}. */
argvec = tsubst_template_args (TYPE_TI_ARGS (t), args,
complain, in_decl);
if (argvec == error_mark_node)
r = error_mark_node;
else if (!entering_scope
&& cxx_dialect >= cxx2a && dependent_scope_p (context))
{
/* See maybe_dependent_member_ref. */
tree name = TYPE_IDENTIFIER (t);
tree fullname = name;
if (instantiates_primary_template_p (t))
fullname = build_nt (TEMPLATE_ID_EXPR, name,
INNERMOST_TEMPLATE_ARGS (argvec));
return build_typename_type (context, name, fullname,
typename_type);
}
else
{
r = lookup_template_class (t, argvec, in_decl, context,
entering_scope, complain);
r = cp_build_qualified_type_real (r, cp_type_quals (t), complain);
}
return r;
}
else
/* This is not a template type, so there's nothing to do. */
return t;
default:
return tsubst (t, args, complain, in_decl);
}
}
static GTY((cache)) decl_tree_cache_map *defarg_inst;
/* Substitute into the default argument ARG (a default argument for
FN), which has the indicated TYPE. */
tree
tsubst_default_argument (tree fn, int parmnum, tree type, tree arg,
tsubst_flags_t complain)
{
int errs = errorcount + sorrycount;
/* This can happen in invalid code. */
if (TREE_CODE (arg) == DEFERRED_PARSE)
return arg;
tree parm = FUNCTION_FIRST_USER_PARM (fn);
parm = chain_index (parmnum, parm);
tree parmtype = TREE_TYPE (parm);
if (DECL_BY_REFERENCE (parm))
parmtype = TREE_TYPE (parmtype);
if (parmtype == error_mark_node)
return error_mark_node;
gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, parmtype));
tree *slot;
if (defarg_inst && (slot = defarg_inst->get (parm)))
return *slot;
/* This default argument came from a template. Instantiate the
default argument here, not in tsubst. In the case of
something like:
template <class T>
struct S {
static T t();
void f(T = t());
};
we must be careful to do name lookup in the scope of S<T>,
rather than in the current class. */
push_to_top_level ();
push_access_scope (fn);
push_deferring_access_checks (dk_no_deferred);
start_lambda_scope (parm);
/* The default argument expression may cause implicitly defined
member functions to be synthesized, which will result in garbage
collection. We must treat this situation as if we were within
the body of function so as to avoid collecting live data on the
stack. */
++function_depth;
arg = tsubst_expr (arg, DECL_TI_ARGS (fn),
complain, NULL_TREE,
/*integral_constant_expression_p=*/false);
--function_depth;
finish_lambda_scope ();
/* Make sure the default argument is reasonable. */
arg = check_default_argument (type, arg, complain);
if (errorcount+sorrycount > errs
&& (complain & tf_warning_or_error))
inform (input_location,
" when instantiating default argument for call to %qD", fn);
pop_deferring_access_checks ();
pop_access_scope (fn);
pop_from_top_level ();
if (arg != error_mark_node && !cp_unevaluated_operand)
{
if (!defarg_inst)
defarg_inst = decl_tree_cache_map::create_ggc (37);
defarg_inst->put (parm, arg);
}
return arg;
}
/* Substitute into all the default arguments for FN. */
static void
tsubst_default_arguments (tree fn, tsubst_flags_t complain)
{
tree arg;
tree tmpl_args;
tmpl_args = DECL_TI_ARGS (fn);
/* If this function is not yet instantiated, we certainly don't need
its default arguments. */
if (uses_template_parms (tmpl_args))
return;
/* Don't do this again for clones. */
if (DECL_CLONED_FUNCTION_P (fn))
return;
int i = 0;
for (arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
arg;
arg = TREE_CHAIN (arg), ++i)
if (TREE_PURPOSE (arg))
TREE_PURPOSE (arg) = tsubst_default_argument (fn, i,
TREE_VALUE (arg),
TREE_PURPOSE (arg),
complain);
}
/* Hash table mapping a FUNCTION_DECL to its dependent explicit-specifier. */
static GTY((cache)) decl_tree_cache_map *explicit_specifier_map;
/* Store a pair to EXPLICIT_SPECIFIER_MAP. */
void
store_explicit_specifier (tree v, tree t)
{
if (!explicit_specifier_map)
explicit_specifier_map = decl_tree_cache_map::create_ggc (37);
DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (v) = true;
explicit_specifier_map->put (v, t);
}
/* Lookup an element in EXPLICIT_SPECIFIER_MAP. */
static tree
lookup_explicit_specifier (tree v)
{
return *explicit_specifier_map->get (v);
}
/* Given T, a FUNCTION_TYPE or METHOD_TYPE, construct and return a corresponding
FUNCTION_TYPE or METHOD_TYPE whose return type is RETURN_TYPE, argument types
are ARG_TYPES, and exception specification is RAISES, and otherwise is
identical to T. */
static tree
rebuild_function_or_method_type (tree t, tree return_type, tree arg_types,
tree raises, tsubst_flags_t complain)
{
gcc_assert (FUNC_OR_METHOD_TYPE_P (t));
tree new_type;
if (TREE_CODE (t) == FUNCTION_TYPE)
{
new_type = build_function_type (return_type, arg_types);
new_type = apply_memfn_quals (new_type, type_memfn_quals (t));
}
else
{
tree r = TREE_TYPE (TREE_VALUE (arg_types));
/* Don't pick up extra function qualifiers from the basetype. */
r = cp_build_qualified_type_real (r, type_memfn_quals (t), complain);
if (! MAYBE_CLASS_TYPE_P (r))
{
/* [temp.deduct]
Type deduction may fail for any of the following
reasons:
-- Attempting to create "pointer to member of T" when T
is not a class type. */
if (complain & tf_error)
error ("creating pointer to member function of non-class type %qT",
r);
return error_mark_node;
}
new_type = build_method_type_directly (r, return_type,
TREE_CHAIN (arg_types));
}
new_type = cp_build_type_attribute_variant (new_type, TYPE_ATTRIBUTES (t));
cp_ref_qualifier rqual = type_memfn_rqual (t);
bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t);
return build_cp_fntype_variant (new_type, rqual, raises, late_return_type_p);
}
/* Check if the function type of DECL, a FUNCTION_DECL, agrees with the type of
each of its formal parameters. If there is a disagreement then rebuild
DECL's function type according to its formal parameter types, as part of a
resolution for Core issues 1001/1322. */
static void
maybe_rebuild_function_decl_type (tree decl)
{
bool function_type_needs_rebuilding = false;
if (tree parm_list = FUNCTION_FIRST_USER_PARM (decl))
{
tree parm_type_list = FUNCTION_FIRST_USER_PARMTYPE (decl);
while (parm_type_list && parm_type_list != void_list_node)
{
tree parm_type = TREE_VALUE (parm_type_list);
tree formal_parm_type_unqual = strip_top_quals (TREE_TYPE (parm_list));
if (!same_type_p (parm_type, formal_parm_type_unqual))
{
function_type_needs_rebuilding = true;
break;
}
parm_list = DECL_CHAIN (parm_list);
parm_type_list = TREE_CHAIN (parm_type_list);
}
}
if (!function_type_needs_rebuilding)
return;
const tree fntype = TREE_TYPE (decl);
tree parm_list = DECL_ARGUMENTS (decl);
tree old_parm_type_list = TYPE_ARG_TYPES (fntype);
tree new_parm_type_list = NULL_TREE;
tree *q = &new_parm_type_list;
for (int skip = num_artificial_parms_for (decl); skip > 0; skip--)
{
*q = copy_node (old_parm_type_list);
parm_list = DECL_CHAIN (parm_list);
old_parm_type_list = TREE_CHAIN (old_parm_type_list);
q = &TREE_CHAIN (*q);
}
while (old_parm_type_list && old_parm_type_list != void_list_node)
{
*q = copy_node (old_parm_type_list);
tree *new_parm_type = &TREE_VALUE (*q);
tree formal_parm_type_unqual = strip_top_quals (TREE_TYPE (parm_list));
if (!same_type_p (*new_parm_type, formal_parm_type_unqual))
*new_parm_type = formal_parm_type_unqual;
parm_list = DECL_CHAIN (parm_list);
old_parm_type_list = TREE_CHAIN (old_parm_type_list);
q = &TREE_CHAIN (*q);
}
if (old_parm_type_list == void_list_node)
*q = void_list_node;
TREE_TYPE (decl)
= rebuild_function_or_method_type (fntype,
TREE_TYPE (fntype), new_parm_type_list,
TYPE_RAISES_EXCEPTIONS (fntype), tf_none);
}
/* Subroutine of tsubst_decl for the case when T is a FUNCTION_DECL. */
static tree
tsubst_function_decl (tree t, tree args, tsubst_flags_t complain,
tree lambda_fntype)
{
tree gen_tmpl, argvec;
hashval_t hash = 0;
tree in_decl = t;
/* Nobody should be tsubst'ing into non-template functions. */
gcc_assert (DECL_TEMPLATE_INFO (t) != NULL_TREE);
if (TREE_CODE (DECL_TI_TEMPLATE (t)) == TEMPLATE_DECL)
{
/* If T is not dependent, just return it. */
if (!uses_template_parms (DECL_TI_ARGS (t))
&& !LAMBDA_FUNCTION_P (t))
return t;
/* Calculate the most general template of which R is a
specialization. */
gen_tmpl = most_general_template (DECL_TI_TEMPLATE (t));
/* We're substituting a lambda function under tsubst_lambda_expr but not
directly from it; find the matching function we're already inside.
But don't do this if T is a generic lambda with a single level of
template parms, as in that case we're doing a normal instantiation. */
if (LAMBDA_FUNCTION_P (t) && !lambda_fntype
&& (!generic_lambda_fn_p (t)
|| TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) > 1))
return enclosing_instantiation_of (t);
/* Calculate the complete set of arguments used to
specialize R. */
argvec = tsubst_template_args (DECL_TI_ARGS
(DECL_TEMPLATE_RESULT
(DECL_TI_TEMPLATE (t))),
args, complain, in_decl);
if (argvec == error_mark_node)
return error_mark_node;
/* Check to see if we already have this specialization. */
if (!lambda_fntype)
{
hash = hash_tmpl_and_args (gen_tmpl, argvec);
if (tree spec = retrieve_specialization (gen_tmpl, argvec, hash))
return spec;
}
/* We can see more levels of arguments than parameters if
there was a specialization of a member template, like
this:
template <class T> struct S { template <class U> void f(); }
template <> template <class U> void S<int>::f(U);
Here, we'll be substituting into the specialization,
because that's where we can find the code we actually
want to generate, but we'll have enough arguments for
the most general template.
We also deal with the peculiar case:
template <class T> struct S {
template <class U> friend void f();
};
template <class U> void f() {}
template S<int>;
template void f<double>();
Here, the ARGS for the instantiation of will be {int,
double}. But, we only need as many ARGS as there are
levels of template parameters in CODE_PATTERN. We are
careful not to get fooled into reducing the ARGS in
situations like:
template <class T> struct S { template <class U> void f(U); }
template <class T> template <> void S<T>::f(int) {}
which we can spot because the pattern will be a
specialization in this case. */
int args_depth = TMPL_ARGS_DEPTH (args);
int parms_depth =
TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (t)));
if (args_depth > parms_depth && !DECL_TEMPLATE_SPECIALIZATION (t))
args = get_innermost_template_args (args, parms_depth);
}
else
{
/* This special case arises when we have something like this:
template <class T> struct S {
friend void f<int>(int, double);
};
Here, the DECL_TI_TEMPLATE for the friend declaration
will be an IDENTIFIER_NODE. We are being called from
tsubst_friend_function, and we want only to create a
new decl (R) with appropriate types so that we can call
determine_specialization. */
gen_tmpl = NULL_TREE;
argvec = NULL_TREE;
}
tree closure = (lambda_fntype ? TYPE_METHOD_BASETYPE (lambda_fntype)
: NULL_TREE);
tree ctx = closure ? closure : DECL_CONTEXT (t);
bool member = ctx && TYPE_P (ctx);
if (member && !closure)
ctx = tsubst_aggr_type (ctx, args,
complain, t, /*entering_scope=*/1);
tree type = (lambda_fntype ? lambda_fntype
: tsubst (TREE_TYPE (t), args,
complain | tf_fndecl_type, in_decl));
if (type == error_mark_node)
return error_mark_node;
/* If we hit excessive deduction depth, the type is bogus even if
it isn't error_mark_node, so don't build a decl. */
if (excessive_deduction_depth)
return error_mark_node;
/* We do NOT check for matching decls pushed separately at this
point, as they may not represent instantiations of this
template, and in any case are considered separate under the
discrete model. */
tree r = copy_decl (t);
DECL_USE_TEMPLATE (r) = 0;
TREE_TYPE (r) = type;
/* Clear out the mangled name and RTL for the instantiation. */
SET_DECL_ASSEMBLER_NAME (r, NULL_TREE);
SET_DECL_RTL (r, NULL);
/* Leave DECL_INITIAL set on deleted instantiations. */
if (!DECL_DELETED_FN (r))
DECL_INITIAL (r) = NULL_TREE;
DECL_CONTEXT (r) = ctx;
/* Handle explicit(dependent-expr). */
if (DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (t))
{
tree spec = lookup_explicit_specifier (t);
spec = tsubst_copy_and_build (spec, args, complain, in_decl,
/*function_p=*/false,
/*i_c_e_p=*/true);
spec = build_explicit_specifier (spec, complain);
DECL_NONCONVERTING_P (r) = (spec == boolean_true_node);
}
/* OpenMP UDRs have the only argument a reference to the declared
type. We want to diagnose if the declared type is a reference,
which is invalid, but as references to references are usually
quietly merged, diagnose it here. */
if (DECL_OMP_DECLARE_REDUCTION_P (t))
{
tree argtype
= TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t))));
argtype = tsubst (argtype, args, complain, in_decl);
if (TYPE_REF_P (argtype))
error_at (DECL_SOURCE_LOCATION (t),
"reference type %qT in "
"%<#pragma omp declare reduction%>", argtype);
if (strchr (IDENTIFIER_POINTER (DECL_NAME (t)), '~') == NULL)
DECL_NAME (r) = omp_reduction_id (ERROR_MARK, DECL_NAME (t),
argtype);
}
if (member && DECL_CONV_FN_P (r))
/* Type-conversion operator. Reconstruct the name, in
case it's the name of one of the template's parameters. */
DECL_NAME (r) = make_conv_op_name (TREE_TYPE (type));
tree parms = DECL_ARGUMENTS (t);
if (closure)
parms = DECL_CHAIN (parms);
parms = tsubst (parms, args, complain, t);
for (tree parm = parms; parm; parm = DECL_CHAIN (parm))
DECL_CONTEXT (parm) = r;
if (closure)
{
tree tparm = build_this_parm (r, closure, type_memfn_quals (type));
DECL_CHAIN (tparm) = parms;
parms = tparm;
}
DECL_ARGUMENTS (r) = parms;
DECL_RESULT (r) = NULL_TREE;
maybe_rebuild_function_decl_type (r);
TREE_STATIC (r) = 0;
TREE_PUBLIC (r) = TREE_PUBLIC (t);
DECL_EXTERNAL (r) = 1;
/* If this is an instantiation of a function with internal
linkage, we already know what object file linkage will be
assigned to the instantiation. */
DECL_INTERFACE_KNOWN (r) = !TREE_PUBLIC (r);
DECL_DEFER_OUTPUT (r) = 0;
DECL_CHAIN (r) = NULL_TREE;
DECL_PENDING_INLINE_INFO (r) = 0;
DECL_PENDING_INLINE_P (r) = 0;
DECL_SAVED_TREE (r) = NULL_TREE;
DECL_STRUCT_FUNCTION (r) = NULL;
TREE_USED (r) = 0;
/* We'll re-clone as appropriate in instantiate_template. */
DECL_CLONED_FUNCTION (r) = NULL_TREE;
/* If we aren't complaining now, return on error before we register
the specialization so that we'll complain eventually. */
if ((complain & tf_error) == 0
&& IDENTIFIER_ANY_OP_P (DECL_NAME (r))
&& !grok_op_properties (r, /*complain=*/false))
return error_mark_node;
/* Associate the constraints directly with the instantiation. We
don't substitute through the constraints; that's only done when
they are checked. */
if (tree ci = get_constraints (t))
/* Unless we're regenerating a lambda, in which case we'll set the
lambda's constraints in tsubst_lambda_expr. */
if (!lambda_fntype)
set_constraints (r, ci);
if (DECL_FRIEND_P (t) && DECL_FRIEND_CONTEXT (t))
SET_DECL_FRIEND_CONTEXT (r,
tsubst (DECL_FRIEND_CONTEXT (t),
args, complain, in_decl));
/* Set up the DECL_TEMPLATE_INFO for R. There's no need to do
this in the special friend case mentioned above where
GEN_TMPL is NULL. */
if (gen_tmpl && !closure)
{
DECL_TEMPLATE_INFO (r)
= build_template_info (gen_tmpl, argvec);
SET_DECL_IMPLICIT_INSTANTIATION (r);
tree new_r
= register_specialization (r, gen_tmpl, argvec, false, hash);
if (new_r != r)
/* We instantiated this while substituting into
the type earlier (template/friend54.C). */
return new_r;
/* We're not supposed to instantiate default arguments
until they are called, for a template. But, for a
declaration like:
template <class T> void f ()
{ extern void g(int i = T()); }
we should do the substitution when the template is
instantiated. We handle the member function case in
instantiate_class_template since the default arguments
might refer to other members of the class. */
if (!member
&& !PRIMARY_TEMPLATE_P (gen_tmpl)
&& !uses_template_parms (argvec))
tsubst_default_arguments (r, complain);
}
else
DECL_TEMPLATE_INFO (r) = NULL_TREE;
/* Copy the list of befriending classes. */
for (tree *friends = &DECL_BEFRIENDING_CLASSES (r);
*friends;
friends = &TREE_CHAIN (*friends))
{
*friends = copy_node (*friends);
TREE_VALUE (*friends)
= tsubst (TREE_VALUE (*friends), args, complain, in_decl);
}
if (DECL_CONSTRUCTOR_P (r) || DECL_DESTRUCTOR_P (r))
{
maybe_retrofit_in_chrg (r);
if (DECL_CONSTRUCTOR_P (r) && !grok_ctor_properties (ctx, r))
return error_mark_node;
/* If this is an instantiation of a member template, clone it.
If it isn't, that'll be handled by
clone_constructors_and_destructors. */
if (PRIMARY_TEMPLATE_P (gen_tmpl))
clone_function_decl (r, /*update_methods=*/false);
}
else if ((complain & tf_error) != 0
&& IDENTIFIER_ANY_OP_P (DECL_NAME (r))
&& !grok_op_properties (r, /*complain=*/true))
return error_mark_node;
/* Possibly limit visibility based on template args. */
DECL_VISIBILITY (r) = VISIBILITY_DEFAULT;
if (DECL_VISIBILITY_SPECIFIED (t))
{
DECL_VISIBILITY_SPECIFIED (r) = 0;
DECL_ATTRIBUTES (r)
= remove_attribute ("visibility", DECL_ATTRIBUTES (r));
}
determine_visibility (r);
if (DECL_DEFAULTED_OUTSIDE_CLASS_P (r)
&& !processing_template_decl)
defaulted_late_check (r);
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0,
args, complain, in_decl);
if (flag_openmp)
if (tree attr = lookup_attribute ("omp declare variant base",
DECL_ATTRIBUTES (r)))
omp_declare_variant_finalize (r, attr);
return r;
}
/* Subroutine of tsubst_decl for the case when T is a TEMPLATE_DECL. */
static tree
tsubst_template_decl (tree t, tree args, tsubst_flags_t complain,
tree lambda_fntype)
{
/* We can get here when processing a member function template,
member class template, or template template parameter. */
tree decl = DECL_TEMPLATE_RESULT (t);
tree in_decl = t;
tree spec;
tree tmpl_args;
tree full_args;
tree r;
hashval_t hash = 0;
if (DECL_TEMPLATE_TEMPLATE_PARM_P (t))
{
/* Template template parameter is treated here. */
tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (new_type == error_mark_node)
r = error_mark_node;
/* If we get a real template back, return it. This can happen in
the context of most_specialized_partial_spec. */
else if (TREE_CODE (new_type) == TEMPLATE_DECL)
r = new_type;
else
/* The new TEMPLATE_DECL was built in
reduce_template_parm_level. */
r = TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (new_type);
return r;
}
if (!lambda_fntype)
{
/* We might already have an instance of this template.
The ARGS are for the surrounding class type, so the
full args contain the tsubst'd args for the context,
plus the innermost args from the template decl. */
tmpl_args = DECL_CLASS_TEMPLATE_P (t)
? CLASSTYPE_TI_ARGS (TREE_TYPE (t))
: DECL_TI_ARGS (DECL_TEMPLATE_RESULT (t));
/* Because this is a template, the arguments will still be
dependent, even after substitution. If
PROCESSING_TEMPLATE_DECL is not set, the dependency
predicates will short-circuit. */
++processing_template_decl;
full_args = tsubst_template_args (tmpl_args, args,
complain, in_decl);
--processing_template_decl;
if (full_args == error_mark_node)
return error_mark_node;
/* If this is a default template template argument,
tsubst might not have changed anything. */
if (full_args == tmpl_args)
return t;
hash = hash_tmpl_and_args (t, full_args);
spec = retrieve_specialization (t, full_args, hash);
if (spec != NULL_TREE)
{
if (TYPE_P (spec))
/* Type partial instantiations are stored as the type by
lookup_template_class_1, not here as the template. */
spec = CLASSTYPE_TI_TEMPLATE (spec);
return spec;
}
}
/* Make a new template decl. It will be similar to the
original, but will record the current template arguments.
We also create a new function declaration, which is just
like the old one, but points to this new template, rather
than the old one. */
r = copy_decl (t);
gcc_assert (DECL_LANG_SPECIFIC (r) != 0);
DECL_CHAIN (r) = NULL_TREE;
// Build new template info linking to the original template decl.
if (!lambda_fntype)
{
DECL_TEMPLATE_INFO (r) = build_template_info (t, args);
SET_DECL_IMPLICIT_INSTANTIATION (r);
}
else
DECL_TEMPLATE_INFO (r) = NULL_TREE;
/* The template parameters for this new template are all the
template parameters for the old template, except the
outermost level of parameters. */
DECL_TEMPLATE_PARMS (r)
= tsubst_template_parms (DECL_TEMPLATE_PARMS (t), args,
complain);
if (TREE_CODE (decl) == TYPE_DECL
&& !TYPE_DECL_ALIAS_P (decl))
{
tree new_type;
++processing_template_decl;
if (CLASS_TYPE_P (TREE_TYPE (t)))
new_type = tsubst_aggr_type (TREE_TYPE (t), args, complain,
in_decl, /*entering*/1);
else
new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
--processing_template_decl;
if (new_type == error_mark_node)
return error_mark_node;
TREE_TYPE (r) = new_type;
/* For a partial specialization, we need to keep pointing to
the primary template. */
if (!DECL_TEMPLATE_SPECIALIZATION (t))
CLASSTYPE_TI_TEMPLATE (new_type) = r;
DECL_TEMPLATE_RESULT (r) = TYPE_MAIN_DECL (new_type);
DECL_TI_ARGS (r) = CLASSTYPE_TI_ARGS (new_type);
DECL_CONTEXT (r) = TYPE_CONTEXT (new_type);
}
else
{
tree new_decl;
++processing_template_decl;
if (TREE_CODE (decl) == FUNCTION_DECL)
new_decl = tsubst_function_decl (decl, args, complain, lambda_fntype);
else
new_decl = tsubst (decl, args, complain, in_decl);
--processing_template_decl;
if (new_decl == error_mark_node)
return error_mark_node;
DECL_TEMPLATE_RESULT (r) = new_decl;
TREE_TYPE (r) = TREE_TYPE (new_decl);
DECL_CONTEXT (r) = DECL_CONTEXT (new_decl);
if (lambda_fntype)
{
tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (r));
DECL_TEMPLATE_INFO (new_decl) = build_template_info (r, args);
}
else
{
DECL_TI_TEMPLATE (new_decl) = r;
DECL_TI_ARGS (r) = DECL_TI_ARGS (new_decl);
}
}
DECL_TEMPLATE_INSTANTIATIONS (r) = NULL_TREE;
DECL_TEMPLATE_SPECIALIZATIONS (r) = NULL_TREE;
if (PRIMARY_TEMPLATE_P (t))
DECL_PRIMARY_TEMPLATE (r) = r;
if (TREE_CODE (decl) != TYPE_DECL && !VAR_P (decl)
&& !lambda_fntype)
/* Record this non-type partial instantiation. */
register_specialization (r, t,
DECL_TI_ARGS (DECL_TEMPLATE_RESULT (r)),
false, hash);
return r;
}
/* True if FN is the op() for a lambda in an uninstantiated template. */
bool
lambda_fn_in_template_p (tree fn)
{
if (!fn || !LAMBDA_FUNCTION_P (fn))
return false;
tree closure = DECL_CONTEXT (fn);
return CLASSTYPE_TEMPLATE_INFO (closure) != NULL_TREE;
}
/* True if FN is the substitution (via tsubst_lambda_expr) of a function for
which the above is true. */
bool
instantiated_lambda_fn_p (tree fn)
{
if (!fn || !LAMBDA_FUNCTION_P (fn))
return false;
tree closure = DECL_CONTEXT (fn);
tree lam = CLASSTYPE_LAMBDA_EXPR (closure);
return LAMBDA_EXPR_INSTANTIATED (lam);
}
/* We're instantiating a variable from template function TCTX. Return the
corresponding current enclosing scope. This gets complicated because lambda
functions in templates are regenerated rather than instantiated, but generic
lambda functions are subsequently instantiated. */
static tree
enclosing_instantiation_of (tree otctx)
{
tree tctx = otctx;
tree fn = current_function_decl;
int lambda_count = 0;
for (; tctx && (lambda_fn_in_template_p (tctx)
|| instantiated_lambda_fn_p (tctx));
tctx = decl_function_context (tctx))
++lambda_count;
for (; fn; fn = decl_function_context (fn))
{
tree ofn = fn;
int flambda_count = 0;
for (; fn && instantiated_lambda_fn_p (fn);
fn = decl_function_context (fn))
++flambda_count;
if ((fn && DECL_TEMPLATE_INFO (fn))
? most_general_template (fn) != most_general_template (tctx)
: fn != tctx)
continue;
if (flambda_count != lambda_count)
{
gcc_assert (flambda_count > lambda_count);
for (; flambda_count > lambda_count; --flambda_count)
ofn = decl_function_context (ofn);
}
gcc_assert (DECL_NAME (ofn) == DECL_NAME (otctx)
|| DECL_CONV_FN_P (ofn));
return ofn;
}
gcc_unreachable ();
}
/* Substitute the ARGS into the T, which is a _DECL. Return the
result of the substitution. Issue error and warning messages under
control of COMPLAIN. */
static tree
tsubst_decl (tree t, tree args, tsubst_flags_t complain)
{
#define RETURN(EXP) do { r = (EXP); goto out; } while(0)
location_t saved_loc;
tree r = NULL_TREE;
tree in_decl = t;
hashval_t hash = 0;
/* Set the filename and linenumber to improve error-reporting. */
saved_loc = input_location;
input_location = DECL_SOURCE_LOCATION (t);
switch (TREE_CODE (t))
{
case TEMPLATE_DECL:
r = tsubst_template_decl (t, args, complain, /*lambda*/NULL_TREE);
break;
case FUNCTION_DECL:
r = tsubst_function_decl (t, args, complain, /*lambda*/NULL_TREE);
break;
case PARM_DECL:
{
tree type = NULL_TREE;
int i, len = 1;
tree expanded_types = NULL_TREE;
tree prev_r = NULL_TREE;
tree first_r = NULL_TREE;
if (DECL_PACK_P (t))
{
/* If there is a local specialization that isn't a
parameter pack, it means that we're doing a "simple"
substitution from inside tsubst_pack_expansion. Just
return the local specialization (which will be a single
parm). */
tree spec = retrieve_local_specialization (t);
if (spec
&& TREE_CODE (spec) == PARM_DECL
&& TREE_CODE (TREE_TYPE (spec)) != TYPE_PACK_EXPANSION)
RETURN (spec);
/* Expand the TYPE_PACK_EXPANSION that provides the types for
the parameters in this function parameter pack. */
expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args,
complain, in_decl);
if (TREE_CODE (expanded_types) == TREE_VEC)
{
len = TREE_VEC_LENGTH (expanded_types);
/* Zero-length parameter packs are boring. Just substitute
into the chain. */
if (len == 0 && !cp_unevaluated_operand)
RETURN (tsubst (TREE_CHAIN (t), args, complain,
TREE_CHAIN (t)));
}
else
{
/* All we did was update the type. Make a note of that. */
type = expanded_types;
expanded_types = NULL_TREE;
}
}
/* Loop through all of the parameters we'll build. When T is
a function parameter pack, LEN is the number of expanded
types in EXPANDED_TYPES; otherwise, LEN is 1. */
r = NULL_TREE;
for (i = 0; i < len; ++i)
{
prev_r = r;
r = copy_node (t);
if (DECL_TEMPLATE_PARM_P (t))
SET_DECL_TEMPLATE_PARM_P (r);
if (expanded_types)
/* We're on the Ith parameter of the function parameter
pack. */
{
/* Get the Ith type. */
type = TREE_VEC_ELT (expanded_types, i);
/* Rename the parameter to include the index. */
DECL_NAME (r)
= make_ith_pack_parameter_name (DECL_NAME (r), i);
}
else if (!type)
/* We're dealing with a normal parameter. */
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
type = type_decays_to (type);
TREE_TYPE (r) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), r);
if (DECL_INITIAL (r))
{
if (TREE_CODE (DECL_INITIAL (r)) != TEMPLATE_PARM_INDEX)
DECL_INITIAL (r) = TREE_TYPE (r);
else
DECL_INITIAL (r) = tsubst (DECL_INITIAL (r), args,
complain, in_decl);
}
DECL_CONTEXT (r) = NULL_TREE;
if (!DECL_TEMPLATE_PARM_P (r))
DECL_ARG_TYPE (r) = type_passed_as (type);
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0,
args, complain, in_decl);
/* Keep track of the first new parameter we
generate. That's what will be returned to the
caller. */
if (!first_r)
first_r = r;
/* Build a proper chain of parameters when substituting
into a function parameter pack. */
if (prev_r)
DECL_CHAIN (prev_r) = r;
}
/* If cp_unevaluated_operand is set, we're just looking for a
single dummy parameter, so don't keep going. */
if (DECL_CHAIN (t) && !cp_unevaluated_operand)
DECL_CHAIN (r) = tsubst (DECL_CHAIN (t), args,
complain, DECL_CHAIN (t));
/* FIRST_R contains the start of the chain we've built. */
r = first_r;
}
break;
case FIELD_DECL:
{
tree type = NULL_TREE;
tree vec = NULL_TREE;
tree expanded_types = NULL_TREE;
int len = 1;
if (PACK_EXPANSION_P (TREE_TYPE (t)))
{
/* This field is a lambda capture pack. Return a TREE_VEC of
the expanded fields to instantiate_class_template_1. */
expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args,
complain, in_decl);
if (TREE_CODE (expanded_types) == TREE_VEC)
{
len = TREE_VEC_LENGTH (expanded_types);
vec = make_tree_vec (len);
}
else
{
/* All we did was update the type. Make a note of that. */
type = expanded_types;
expanded_types = NULL_TREE;
}
}
for (int i = 0; i < len; ++i)
{
r = copy_decl (t);
if (expanded_types)
{
type = TREE_VEC_ELT (expanded_types, i);
DECL_NAME (r)
= make_ith_pack_parameter_name (DECL_NAME (r), i);
}
else if (!type)
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (type == error_mark_node)
RETURN (error_mark_node);
TREE_TYPE (r) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), r);
if (DECL_C_BIT_FIELD (r))
/* For bit-fields, DECL_BIT_FIELD_REPRESENTATIVE gives the
number of bits. */
DECL_BIT_FIELD_REPRESENTATIVE (r)
= tsubst_expr (DECL_BIT_FIELD_REPRESENTATIVE (t), args,
complain, in_decl,
/*integral_constant_expression_p=*/true);
if (DECL_INITIAL (t))
{
/* Set up DECL_TEMPLATE_INFO so that we can get at the
NSDMI in perform_member_init. Still set DECL_INITIAL
so that we know there is one. */
DECL_INITIAL (r) = void_node;
gcc_assert (DECL_LANG_SPECIFIC (r) == NULL);
retrofit_lang_decl (r);
DECL_TEMPLATE_INFO (r) = build_template_info (t, args);
}
/* We don't have to set DECL_CONTEXT here; it is set by
finish_member_declaration. */
DECL_CHAIN (r) = NULL_TREE;
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0,
args, complain, in_decl);
if (vec)
TREE_VEC_ELT (vec, i) = r;
}
if (vec)
r = vec;
}
break;
case USING_DECL:
/* We reach here only for member using decls. We also need to check
uses_template_parms because DECL_DEPENDENT_P is not set for a
using-declaration that designates a member of the current
instantiation (c++/53549). */
if (DECL_DEPENDENT_P (t)
|| uses_template_parms (USING_DECL_SCOPE (t)))
{
tree scope = USING_DECL_SCOPE (t);
tree name = tsubst_copy (DECL_NAME (t), args, complain, in_decl);
if (PACK_EXPANSION_P (scope))
{
tree vec = tsubst_pack_expansion (scope, args, complain, in_decl);
int len = TREE_VEC_LENGTH (vec);
r = make_tree_vec (len);
for (int i = 0; i < len; ++i)
{
tree escope = TREE_VEC_ELT (vec, i);
tree elt = do_class_using_decl (escope, name);
if (!elt)
{
r = error_mark_node;
break;
}
else
{
TREE_PROTECTED (elt) = TREE_PROTECTED (t);
TREE_PRIVATE (elt) = TREE_PRIVATE (t);
}
TREE_VEC_ELT (r, i) = elt;
}
}
else
{
tree inst_scope = tsubst_copy (USING_DECL_SCOPE (t), args,
complain, in_decl);
r = do_class_using_decl (inst_scope, name);
if (!r)
r = error_mark_node;
else
{
TREE_PROTECTED (r) = TREE_PROTECTED (t);
TREE_PRIVATE (r) = TREE_PRIVATE (t);
}
}
}
else
{
r = copy_node (t);
DECL_CHAIN (r) = NULL_TREE;
}
break;
case TYPE_DECL:
case VAR_DECL:
{
tree argvec = NULL_TREE;
tree gen_tmpl = NULL_TREE;
tree spec;
tree tmpl = NULL_TREE;
tree ctx;
tree type = NULL_TREE;
bool local_p;
if (TREE_TYPE (t) == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (t) == TYPE_DECL
&& t == TYPE_MAIN_DECL (TREE_TYPE (t)))
{
/* If this is the canonical decl, we don't have to
mess with instantiations, and often we can't (for
typename, template type parms and such). Note that
TYPE_NAME is not correct for the above test if
we've copied the type for a typedef. */
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (type == error_mark_node)
RETURN (error_mark_node);
r = TYPE_NAME (type);
break;
}
/* Check to see if we already have the specialization we
need. */
spec = NULL_TREE;
if (DECL_CLASS_SCOPE_P (t) || DECL_NAMESPACE_SCOPE_P (t))
{
/* T is a static data member or namespace-scope entity.
We have to substitute into namespace-scope variables
(not just variable templates) because of cases like:
template <class T> void f() { extern T t; }
where the entity referenced is not known until
instantiation time. */
local_p = false;
ctx = DECL_CONTEXT (t);
if (DECL_CLASS_SCOPE_P (t))
{
ctx = tsubst_aggr_type (ctx, args,
complain,
in_decl, /*entering_scope=*/1);
/* If CTX is unchanged, then T is in fact the
specialization we want. That situation occurs when
referencing a static data member within in its own
class. We can use pointer equality, rather than
same_type_p, because DECL_CONTEXT is always
canonical... */
if (ctx == DECL_CONTEXT (t)
/* ... unless T is a member template; in which
case our caller can be willing to create a
specialization of that template represented
by T. */
&& !(DECL_TI_TEMPLATE (t)
&& DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (t))))
spec = t;
}
if (!spec)
{
tmpl = DECL_TI_TEMPLATE (t);
gen_tmpl = most_general_template (tmpl);
argvec = tsubst (DECL_TI_ARGS (t), args, complain, in_decl);
if (argvec != error_mark_node)
argvec = (coerce_innermost_template_parms
(DECL_TEMPLATE_PARMS (gen_tmpl),
argvec, t, complain,
/*all*/true, /*defarg*/true));
if (argvec == error_mark_node)
RETURN (error_mark_node);
hash = hash_tmpl_and_args (gen_tmpl, argvec);
spec = retrieve_specialization (gen_tmpl, argvec, hash);
}
}
else
{
/* A local variable. */
local_p = true;
/* Subsequent calls to pushdecl will fill this in. */
ctx = NULL_TREE;
/* Unless this is a reference to a static variable from an
enclosing function, in which case we need to fill it in now. */
if (TREE_STATIC (t))
{
tree fn = enclosing_instantiation_of (DECL_CONTEXT (t));
if (fn != current_function_decl)
ctx = fn;
}
spec = retrieve_local_specialization (t);
}
/* If we already have the specialization we need, there is
nothing more to do. */
if (spec)
{
r = spec;
break;
}
/* Create a new node for the specialization we need. */
if (type == NULL_TREE)
{
if (is_typedef_decl (t))
type = DECL_ORIGINAL_TYPE (t);
else
type = TREE_TYPE (t);
if (VAR_P (t)
&& VAR_HAD_UNKNOWN_BOUND (t)
&& type != error_mark_node)
type = strip_array_domain (type);
tree sub_args = args;
if (tree auto_node = type_uses_auto (type))
{
/* Mask off any template args past the variable's context so we
don't replace the auto with an unrelated argument. */
int nouter = TEMPLATE_TYPE_LEVEL (auto_node) - 1;
int extra = TMPL_ARGS_DEPTH (args) - nouter;
if (extra > 0)
/* This should never happen with the new lambda instantiation
model, but keep the handling just in case. */
gcc_assert (!CHECKING_P),
sub_args = strip_innermost_template_args (args, extra);
}
type = tsubst (type, sub_args, complain, in_decl);
/* Substituting the type might have recursively instantiated this
same alias (c++/86171). */
if (gen_tmpl && DECL_ALIAS_TEMPLATE_P (gen_tmpl)
&& (spec = retrieve_specialization (gen_tmpl, argvec, hash)))
{
r = spec;
break;
}
}
r = copy_decl (t);
if (VAR_P (r))
{
DECL_INITIALIZED_P (r) = 0;
DECL_TEMPLATE_INSTANTIATED (r) = 0;
if (type == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (type) == FUNCTION_TYPE)
{
/* It may seem that this case cannot occur, since:
typedef void f();
void g() { f x; }
declares a function, not a variable. However:
typedef void f();
template <typename T> void g() { T t; }
template void g<f>();
is an attempt to declare a variable with function
type. */
error ("variable %qD has function type",
/* R is not yet sufficiently initialized, so we
just use its name. */
DECL_NAME (r));
RETURN (error_mark_node);
}
type = complete_type (type);
/* Wait until cp_finish_decl to set this again, to handle
circular dependency (template/instantiate6.C). */
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = 0;
type = check_var_type (DECL_NAME (r), type,
DECL_SOURCE_LOCATION (r));
if (DECL_HAS_VALUE_EXPR_P (t))
{
tree ve = DECL_VALUE_EXPR (t);
/* If the DECL_VALUE_EXPR is converted to the declared type,
preserve the identity so that gimplify_type_sizes works. */
bool nop = (TREE_CODE (ve) == NOP_EXPR);
if (nop)
ve = TREE_OPERAND (ve, 0);
ve = tsubst_expr (ve, args, complain, in_decl,
/*constant_expression_p=*/false);
if (REFERENCE_REF_P (ve))
{
gcc_assert (TYPE_REF_P (type));
ve = TREE_OPERAND (ve, 0);
}
if (nop)
ve = build_nop (type, ve);
else
gcc_checking_assert (TREE_TYPE (ve) == type);
SET_DECL_VALUE_EXPR (r, ve);
}
if (CP_DECL_THREAD_LOCAL_P (r)
&& !processing_template_decl)
set_decl_tls_model (r, decl_default_tls_model (r));
}
else if (DECL_SELF_REFERENCE_P (t))
SET_DECL_SELF_REFERENCE_P (r);
TREE_TYPE (r) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), r);
DECL_CONTEXT (r) = ctx;
/* Clear out the mangled name and RTL for the instantiation. */
SET_DECL_ASSEMBLER_NAME (r, NULL_TREE);
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL))
SET_DECL_RTL (r, NULL);
/* The initializer must not be expanded until it is required;
see [temp.inst]. */
DECL_INITIAL (r) = NULL_TREE;
DECL_SIZE (r) = DECL_SIZE_UNIT (r) = 0;
if (VAR_P (r))
{
if (DECL_LANG_SPECIFIC (r))
SET_DECL_DEPENDENT_INIT_P (r, false);
SET_DECL_MODE (r, VOIDmode);
/* Possibly limit visibility based on template args. */
DECL_VISIBILITY (r) = VISIBILITY_DEFAULT;
if (DECL_VISIBILITY_SPECIFIED (t))
{
DECL_VISIBILITY_SPECIFIED (r) = 0;
DECL_ATTRIBUTES (r)
= remove_attribute ("visibility", DECL_ATTRIBUTES (r));
}
determine_visibility (r);
}
if (!local_p)
{
/* A static data member declaration is always marked
external when it is declared in-class, even if an
initializer is present. We mimic the non-template
processing here. */
DECL_EXTERNAL (r) = 1;
if (DECL_NAMESPACE_SCOPE_P (t))
DECL_NOT_REALLY_EXTERN (r) = 1;
DECL_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec);
SET_DECL_IMPLICIT_INSTANTIATION (r);
/* Remember whether we require constant initialization of
a non-constant template variable. */
TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (r))
= TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (t));
if (!error_operand_p (r) || (complain & tf_error))
register_specialization (r, gen_tmpl, argvec, false, hash);
}
else
{
if (DECL_LANG_SPECIFIC (r))
DECL_TEMPLATE_INFO (r) = NULL_TREE;
if (!cp_unevaluated_operand)
register_local_specialization (r, t);
}
DECL_CHAIN (r) = NULL_TREE;
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r),
/*flags=*/0,
args, complain, in_decl);
/* Preserve a typedef that names a type. */
if (is_typedef_decl (r) && type != error_mark_node)
{
DECL_ORIGINAL_TYPE (r) = NULL_TREE;
set_underlying_type (r);
if (TYPE_DECL_ALIAS_P (r))
/* An alias template specialization can be dependent
even if its underlying type is not. */
TYPE_DEPENDENT_P_VALID (TREE_TYPE (r)) = false;
}
layout_decl (r, 0);
}
break;
default:
gcc_unreachable ();
}
#undef RETURN
out:
/* Restore the file and line information. */
input_location = saved_loc;
return r;
}
/* Substitute into the complete parameter type list PARMS. */
tree
tsubst_function_parms (tree parms,
tree args,
tsubst_flags_t complain,
tree in_decl)
{
return tsubst_arg_types (parms, args, NULL_TREE, complain, in_decl);
}
/* Substitute into the ARG_TYPES of a function type.
If END is a TREE_CHAIN, leave it and any following types
un-substituted. */
static tree
tsubst_arg_types (tree arg_types,
tree args,
tree end,
tsubst_flags_t complain,
tree in_decl)
{
tree remaining_arg_types;
tree type = NULL_TREE;
int i = 1;
tree expanded_args = NULL_TREE;
tree default_arg;
if (!arg_types || arg_types == void_list_node || arg_types == end)
return arg_types;
remaining_arg_types = tsubst_arg_types (TREE_CHAIN (arg_types),
args, end, complain, in_decl);
if (remaining_arg_types == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (TREE_VALUE (arg_types)))
{
/* For a pack expansion, perform substitution on the
entire expression. Later on, we'll handle the arguments
one-by-one. */
expanded_args = tsubst_pack_expansion (TREE_VALUE (arg_types),
args, complain, in_decl);
if (TREE_CODE (expanded_args) == TREE_VEC)
/* So that we'll spin through the parameters, one by one. */
i = TREE_VEC_LENGTH (expanded_args);
else
{
/* We only partially substituted into the parameter
pack. Our type is TYPE_PACK_EXPANSION. */
type = expanded_args;
expanded_args = NULL_TREE;
}
}
while (i > 0) {
--i;
if (expanded_args)
type = TREE_VEC_ELT (expanded_args, i);
else if (!type)
type = tsubst (TREE_VALUE (arg_types), args, complain, in_decl);
if (type == error_mark_node)
return error_mark_node;
if (VOID_TYPE_P (type))
{
if (complain & tf_error)
{
error ("invalid parameter type %qT", type);
if (in_decl)
error ("in declaration %q+D", in_decl);
}
return error_mark_node;
}
/* DR 657. */
if (abstract_virtuals_error_sfinae (ACU_PARM, type, complain))
return error_mark_node;
/* Do array-to-pointer, function-to-pointer conversion, and ignore
top-level qualifiers as required. */
type = cv_unqualified (type_decays_to (type));
/* We do not substitute into default arguments here. The standard
mandates that they be instantiated only when needed, which is
done in build_over_call. */
default_arg = TREE_PURPOSE (arg_types);
/* Except that we do substitute default arguments under tsubst_lambda_expr,
since the new op() won't have any associated template arguments for us
to refer to later. */
if (lambda_fn_in_template_p (in_decl))
default_arg = tsubst_copy_and_build (default_arg, args, complain, in_decl,
false/*fn*/, false/*constexpr*/);
if (default_arg && TREE_CODE (default_arg) == DEFERRED_PARSE)
{
/* We've instantiated a template before its default arguments
have been parsed. This can happen for a nested template
class, and is not an error unless we require the default
argument in a call of this function. */
remaining_arg_types =
tree_cons (default_arg, type, remaining_arg_types);
vec_safe_push (DEFPARSE_INSTANTIATIONS (default_arg),
remaining_arg_types);
}
else
remaining_arg_types =
hash_tree_cons (default_arg, type, remaining_arg_types);
}
return remaining_arg_types;
}
/* Substitute into a FUNCTION_TYPE or METHOD_TYPE. This routine does
*not* handle the exception-specification for FNTYPE, because the
initial substitution of explicitly provided template parameters
during argument deduction forbids substitution into the
exception-specification:
[temp.deduct]
All references in the function type of the function template to the
corresponding template parameters are replaced by the specified tem-
plate argument values. If a substitution in a template parameter or
in the function type of the function template results in an invalid
type, type deduction fails. [Note: The equivalent substitution in
exception specifications is done only when the function is instanti-
ated, at which point a program is ill-formed if the substitution
results in an invalid type.] */
static tree
tsubst_function_type (tree t,
tree args,
tsubst_flags_t complain,
tree in_decl)
{
tree return_type;
tree arg_types = NULL_TREE;
/* The TYPE_CONTEXT is not used for function/method types. */
gcc_assert (TYPE_CONTEXT (t) == NULL_TREE);
/* DR 1227: Mixing immediate and non-immediate contexts in deduction
failure. */
bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t);
if (late_return_type_p)
{
/* Substitute the argument types. */
arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE,
complain, in_decl);
if (arg_types == error_mark_node)
return error_mark_node;
tree save_ccp = current_class_ptr;
tree save_ccr = current_class_ref;
tree this_type = (TREE_CODE (t) == METHOD_TYPE
? TREE_TYPE (TREE_VALUE (arg_types)) : NULL_TREE);
bool do_inject = this_type && CLASS_TYPE_P (this_type);
if (do_inject)
{
/* DR 1207: 'this' is in scope in the trailing return type. */
inject_this_parameter (this_type, cp_type_quals (this_type));
}
/* Substitute the return type. */
return_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (do_inject)
{
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
}
}
else
/* Substitute the return type. */
return_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (return_type == error_mark_node)
return error_mark_node;
/* DR 486 clarifies that creation of a function type with an
invalid return type is a deduction failure. */
if (TREE_CODE (return_type) == ARRAY_TYPE
|| TREE_CODE (return_type) == FUNCTION_TYPE)
{
if (complain & tf_error)
{
if (TREE_CODE (return_type) == ARRAY_TYPE)
error ("function returning an array");
else
error ("function returning a function");
}
return error_mark_node;
}
/* And DR 657. */
if (abstract_virtuals_error_sfinae (ACU_RETURN, return_type, complain))
return error_mark_node;
if (!late_return_type_p)
{
/* Substitute the argument types. */
arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE,
complain, in_decl);
if (arg_types == error_mark_node)
return error_mark_node;
}
/* Construct a new type node and return it. */
return rebuild_function_or_method_type (t, return_type, arg_types,
/*raises=*/NULL_TREE, complain);
}
/* FNTYPE is a FUNCTION_TYPE or METHOD_TYPE. Substitute the template
ARGS into that specification, and return the substituted
specification. If there is no specification, return NULL_TREE. */
static tree
tsubst_exception_specification (tree fntype,
tree args,
tsubst_flags_t complain,
tree in_decl,
bool defer_ok)
{
tree specs;
tree new_specs;
specs = TYPE_RAISES_EXCEPTIONS (fntype);
new_specs = NULL_TREE;
if (specs && TREE_PURPOSE (specs))
{
/* A noexcept-specifier. */
tree expr = TREE_PURPOSE (specs);
if (TREE_CODE (expr) == INTEGER_CST)
new_specs = expr;
else if (defer_ok)
{
/* Defer instantiation of noexcept-specifiers to avoid
excessive instantiations (c++/49107). */
new_specs = make_node (DEFERRED_NOEXCEPT);
if (DEFERRED_NOEXCEPT_SPEC_P (specs))
{
/* We already partially instantiated this member template,
so combine the new args with the old. */
DEFERRED_NOEXCEPT_PATTERN (new_specs)
= DEFERRED_NOEXCEPT_PATTERN (expr);
DEFERRED_NOEXCEPT_ARGS (new_specs)
= add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args);
}
else
{
DEFERRED_NOEXCEPT_PATTERN (new_specs) = expr;
DEFERRED_NOEXCEPT_ARGS (new_specs) = args;
}
}
else
{
if (DEFERRED_NOEXCEPT_SPEC_P (specs))
{
args = add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr),
args);
expr = DEFERRED_NOEXCEPT_PATTERN (expr);
}
new_specs = tsubst_copy_and_build
(expr, args, complain, in_decl, /*function_p=*/false,
/*integral_constant_expression_p=*/true);
}
new_specs = build_noexcept_spec (new_specs, complain);
}
else if (specs)
{
if (! TREE_VALUE (specs))
new_specs = specs;
else
while (specs)
{
tree spec;
int i, len = 1;
tree expanded_specs = NULL_TREE;
if (PACK_EXPANSION_P (TREE_VALUE (specs)))
{
/* Expand the pack expansion type. */
expanded_specs = tsubst_pack_expansion (TREE_VALUE (specs),
args, complain,
in_decl);
if (expanded_specs == error_mark_node)
return error_mark_node;
else if (TREE_CODE (expanded_specs) == TREE_VEC)
len = TREE_VEC_LENGTH (expanded_specs);
else
{
/* We're substituting into a member template, so
we got a TYPE_PACK_EXPANSION back. Add that
expansion and move on. */
gcc_assert (TREE_CODE (expanded_specs)
== TYPE_PACK_EXPANSION);
new_specs = add_exception_specifier (new_specs,
expanded_specs,
complain);
specs = TREE_CHAIN (specs);
continue;
}
}
for (i = 0; i < len; ++i)
{
if (expanded_specs)
spec = TREE_VEC_ELT (expanded_specs, i);
else
spec = tsubst (TREE_VALUE (specs), args, complain, in_decl);
if (spec == error_mark_node)
return spec;
new_specs = add_exception_specifier (new_specs, spec,
complain);
}
specs = TREE_CHAIN (specs);
}
}
return new_specs;
}
/* Take the tree structure T and replace template parameters used
therein with the argument vector ARGS. IN_DECL is an associated
decl for diagnostics. If an error occurs, returns ERROR_MARK_NODE.
Issue error and warning messages under control of COMPLAIN. Note
that we must be relatively non-tolerant of extensions here, in
order to preserve conformance; if we allow substitutions that
should not be allowed, we may allow argument deductions that should
not succeed, and therefore report ambiguous overload situations
where there are none. In theory, we could allow the substitution,
but indicate that it should have failed, and allow our caller to
make sure that the right thing happens, but we don't try to do this
yet.
This function is used for dealing with types, decls and the like;
for expressions, use tsubst_expr or tsubst_copy. */
tree
tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
enum tree_code code;
tree type, r = NULL_TREE;
if (t == NULL_TREE || t == error_mark_node
|| t == integer_type_node
|| t == void_type_node
|| t == char_type_node
|| t == unknown_type_node
|| TREE_CODE (t) == NAMESPACE_DECL
|| TREE_CODE (t) == TRANSLATION_UNIT_DECL)
return t;
if (DECL_P (t))
return tsubst_decl (t, args, complain);
if (args == NULL_TREE)
return t;
code = TREE_CODE (t);
if (code == IDENTIFIER_NODE)
type = IDENTIFIER_TYPE_VALUE (t);
else
type = TREE_TYPE (t);
gcc_assert (type != unknown_type_node);
/* Reuse typedefs. We need to do this to handle dependent attributes,
such as attribute aligned. */
if (TYPE_P (t)
&& typedef_variant_p (t))
{
tree decl = TYPE_NAME (t);
if (alias_template_specialization_p (t, nt_opaque))
{
/* DECL represents an alias template and we want to
instantiate it. */
tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl));
tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl);
r = instantiate_alias_template (tmpl, gen_args, complain);
}
else if (DECL_CLASS_SCOPE_P (decl)
&& CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (decl))
&& uses_template_parms (DECL_CONTEXT (decl)))
{
tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl));
tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl);
r = retrieve_specialization (tmpl, gen_args, 0);
}
else if (DECL_FUNCTION_SCOPE_P (decl)
&& DECL_TEMPLATE_INFO (DECL_CONTEXT (decl))
&& uses_template_parms (DECL_TI_ARGS (DECL_CONTEXT (decl))))
r = retrieve_local_specialization (decl);
else
/* The typedef is from a non-template context. */
return t;
if (r)
{
r = TREE_TYPE (r);
r = cp_build_qualified_type_real
(r, cp_type_quals (t) | cp_type_quals (r),
complain | tf_ignore_bad_quals);
return r;
}
else
{
/* We don't have an instantiation yet, so drop the typedef. */
int quals = cp_type_quals (t);
t = DECL_ORIGINAL_TYPE (decl);
t = cp_build_qualified_type_real (t, quals,
complain | tf_ignore_bad_quals);
}
}
bool fndecl_type = (complain & tf_fndecl_type);
complain &= ~tf_fndecl_type;
if (type
&& code != TYPENAME_TYPE
&& code != TEMPLATE_TYPE_PARM
&& code != TEMPLATE_PARM_INDEX
&& code != IDENTIFIER_NODE
&& code != FUNCTION_TYPE
&& code != METHOD_TYPE)
type = tsubst (type, args, complain, in_decl);
if (type == error_mark_node)
return error_mark_node;
switch (code)
{
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
return tsubst_aggr_type (t, args, complain, in_decl,
/*entering_scope=*/0);
case ERROR_MARK:
case IDENTIFIER_NODE:
case VOID_TYPE:
case REAL_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case BOOLEAN_TYPE:
case NULLPTR_TYPE:
case LANG_TYPE:
return t;
case INTEGER_TYPE:
if (t == integer_type_node)
return t;
if (TREE_CODE (TYPE_MIN_VALUE (t)) == INTEGER_CST
&& TREE_CODE (TYPE_MAX_VALUE (t)) == INTEGER_CST)
return t;
{
tree max, omax = TREE_OPERAND (TYPE_MAX_VALUE (t), 0);
max = tsubst_expr (omax, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
/* Fix up type of the magic NOP_EXPR with TREE_SIDE_EFFECTS if
needed. */
if (TREE_CODE (max) == NOP_EXPR
&& TREE_SIDE_EFFECTS (omax)
&& !TREE_TYPE (max))
TREE_TYPE (max) = TREE_TYPE (TREE_OPERAND (max, 0));
/* If we're in a partial instantiation, preserve the magic NOP_EXPR
with TREE_SIDE_EFFECTS that indicates this is not an integral
constant expression. */
if (processing_template_decl
&& TREE_SIDE_EFFECTS (omax) && TREE_CODE (omax) == NOP_EXPR)
{
gcc_assert (TREE_CODE (max) == NOP_EXPR);
TREE_SIDE_EFFECTS (max) = 1;
}
return compute_array_index_type (NULL_TREE, max, complain);
}
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
{
int idx;
int level;
int levels;
tree arg = NULL_TREE;
r = NULL_TREE;
gcc_assert (TREE_VEC_LENGTH (args) > 0);
template_parm_level_and_index (t, &level, &idx);
levels = TMPL_ARGS_DEPTH (args);
if (level <= levels
&& TREE_VEC_LENGTH (TMPL_ARGS_LEVEL (args, level)) > 0)
{
arg = TMPL_ARG (args, level, idx);
/* See through ARGUMENT_PACK_SELECT arguments. */
if (arg && TREE_CODE (arg) == ARGUMENT_PACK_SELECT)
arg = argument_pack_select_arg (arg);
}
if (arg == error_mark_node)
return error_mark_node;
else if (arg != NULL_TREE)
{
if (ARGUMENT_PACK_P (arg))
/* If ARG is an argument pack, we don't actually want to
perform a substitution here, because substitutions
for argument packs are only done
element-by-element. We can get to this point when
substituting the type of a non-type template
parameter pack, when that type actually contains
template parameter packs from an outer template, e.g.,
template<typename... Types> struct A {
template<Types... Values> struct B { };
}; */
return t;
if (code == TEMPLATE_TYPE_PARM)
{
int quals;
/* When building concept checks for the purpose of
deducing placeholders, we can end up with wildcards
where types are expected. Adjust this to the deduced
value. */
if (TREE_CODE (arg) == WILDCARD_DECL)
arg = TREE_TYPE (TREE_TYPE (arg));
gcc_assert (TYPE_P (arg));
quals = cp_type_quals (arg) | cp_type_quals (t);
return cp_build_qualified_type_real
(arg, quals, complain | tf_ignore_bad_quals);
}
else if (code == BOUND_TEMPLATE_TEMPLATE_PARM)
{
/* We are processing a type constructed from a
template template parameter. */
tree argvec = tsubst (TYPE_TI_ARGS (t),
args, complain, in_decl);
if (argvec == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (arg) == TEMPLATE_DECL
|| TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE);
if (TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE)
/* Consider this code:
template <template <class> class Template>
struct Internal {
template <class Arg> using Bind = Template<Arg>;
};
template <template <class> class Template, class Arg>
using Instantiate = Template<Arg>; //#0
template <template <class> class Template,
class Argument>
using Bind =
Instantiate<Internal<Template>::template Bind,
Argument>; //#1
When #1 is parsed, the
BOUND_TEMPLATE_TEMPLATE_PARM representing the
parameter `Template' in #0 matches the
UNBOUND_CLASS_TEMPLATE representing the argument
`Internal<Template>::template Bind'; We then want
to assemble the type `Bind<Argument>' that can't
be fully created right now, because
`Internal<Template>' not being complete, the Bind
template cannot be looked up in that context. So
we need to "store" `Bind<Argument>' for later
when the context of Bind becomes complete. Let's
store that in a TYPENAME_TYPE. */
return make_typename_type (TYPE_CONTEXT (arg),
build_nt (TEMPLATE_ID_EXPR,
TYPE_IDENTIFIER (arg),
argvec),
typename_type,
complain);
/* We can get a TEMPLATE_TEMPLATE_PARM here when we
are resolving nested-types in the signature of a
member function templates. Otherwise ARG is a
TEMPLATE_DECL and is the real template to be
instantiated. */
if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM)
arg = TYPE_NAME (arg);
r = lookup_template_class (arg,
argvec, in_decl,
DECL_CONTEXT (arg),
/*entering_scope=*/0,
complain);
return cp_build_qualified_type_real
(r, cp_type_quals (t) | cp_type_quals (r), complain);
}
else if (code == TEMPLATE_TEMPLATE_PARM)
return arg;
else
/* TEMPLATE_PARM_INDEX. */
return convert_from_reference (unshare_expr (arg));
}
if (level == 1)
/* This can happen during the attempted tsubst'ing in
unify. This means that we don't yet have any information
about the template parameter in question. */
return t;
/* Early in template argument deduction substitution, we don't
want to reduce the level of 'auto', or it will be confused
with a normal template parm in subsequent deduction.
Similarly, don't reduce the level of template parameters to
avoid mismatches when deducing their types. */
if (complain & tf_partial)
return t;
/* If we get here, we must have been looking at a parm for a
more deeply nested template. Make a new version of this
template parameter, but with a lower level. */
switch (code)
{
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
if (cp_type_quals (t))
{
r = tsubst (TYPE_MAIN_VARIANT (t), args, complain, in_decl);
r = cp_build_qualified_type_real
(r, cp_type_quals (t),
complain | (code == TEMPLATE_TYPE_PARM
? tf_ignore_bad_quals : 0));
}
else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM
&& PLACEHOLDER_TYPE_CONSTRAINTS (t)
&& (r = (TEMPLATE_PARM_DESCENDANTS
(TEMPLATE_TYPE_PARM_INDEX (t))))
&& (r = TREE_TYPE (r))
&& !PLACEHOLDER_TYPE_CONSTRAINTS (r))
/* Break infinite recursion when substituting the constraints
of a constrained placeholder. */;
else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM
&& !PLACEHOLDER_TYPE_CONSTRAINTS (t)
&& !CLASS_PLACEHOLDER_TEMPLATE (t)
&& (arg = TEMPLATE_TYPE_PARM_INDEX (t),
r = TEMPLATE_PARM_DESCENDANTS (arg))
&& (TEMPLATE_PARM_LEVEL (r)
== TEMPLATE_PARM_LEVEL (arg) - levels))
/* Cache the simple case of lowering a type parameter. */
r = TREE_TYPE (r);
else
{
r = copy_type (t);
TEMPLATE_TYPE_PARM_INDEX (r)
= reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (t),
r, levels, args, complain);
TYPE_STUB_DECL (r) = TYPE_NAME (r) = TEMPLATE_TYPE_DECL (r);
TYPE_MAIN_VARIANT (r) = r;
TYPE_POINTER_TO (r) = NULL_TREE;
TYPE_REFERENCE_TO (r) = NULL_TREE;
if (TREE_CODE (t) == TEMPLATE_TYPE_PARM)
{
/* Propagate constraints on placeholders since they are
only instantiated during satisfaction. */
if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t))
PLACEHOLDER_TYPE_CONSTRAINTS (r) = constr;
else if (tree pl = CLASS_PLACEHOLDER_TEMPLATE (t))
{
pl = tsubst_copy (pl, args, complain, in_decl);
CLASS_PLACEHOLDER_TEMPLATE (r) = pl;
}
}
if (TREE_CODE (r) == TEMPLATE_TEMPLATE_PARM)
/* We have reduced the level of the template
template parameter, but not the levels of its
template parameters, so canonical_type_parameter
will not be able to find the canonical template
template parameter for this level. Thus, we
require structural equality checking to compare
TEMPLATE_TEMPLATE_PARMs. */
SET_TYPE_STRUCTURAL_EQUALITY (r);
else if (TYPE_STRUCTURAL_EQUALITY_P (t))
SET_TYPE_STRUCTURAL_EQUALITY (r);
else
TYPE_CANONICAL (r) = canonical_type_parameter (r);
if (code == BOUND_TEMPLATE_TEMPLATE_PARM)
{
tree tinfo = TYPE_TEMPLATE_INFO (t);
/* We might need to substitute into the types of non-type
template parameters. */
tree tmpl = tsubst (TI_TEMPLATE (tinfo), args,
complain, in_decl);
if (tmpl == error_mark_node)
return error_mark_node;
tree argvec = tsubst (TI_ARGS (tinfo), args,
complain, in_decl);
if (argvec == error_mark_node)
return error_mark_node;
TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (r)
= build_template_info (tmpl, argvec);
}
}
break;
case TEMPLATE_PARM_INDEX:
/* OK, now substitute the type of the non-type parameter. We
couldn't do it earlier because it might be an auto parameter,
and we wouldn't need to if we had an argument. */
type = tsubst (type, args, complain, in_decl);
if (type == error_mark_node)
return error_mark_node;
r = reduce_template_parm_level (t, type, levels, args, complain);
break;
default:
gcc_unreachable ();
}
return r;
}
case TREE_LIST:
{
tree purpose, value, chain;
if (t == void_list_node)
return t;
if ((TREE_PURPOSE (t) && PACK_EXPANSION_P (TREE_PURPOSE (t)))
|| (TREE_VALUE (t) && PACK_EXPANSION_P (TREE_VALUE (t))))
{
/* We have pack expansions, so expand those and
create a new list out of it. */
/* Expand the argument expressions. */
tree purposevec = NULL_TREE;
if (TREE_PURPOSE (t))
purposevec = tsubst_pack_expansion (TREE_PURPOSE (t), args,
complain, in_decl);
if (purposevec == error_mark_node)
return error_mark_node;
tree valuevec = NULL_TREE;
if (TREE_VALUE (t))
valuevec = tsubst_pack_expansion (TREE_VALUE (t), args,
complain, in_decl);
if (valuevec == error_mark_node)
return error_mark_node;
/* Build the rest of the list. */
tree chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = tsubst (chain, args, complain, in_decl);
if (chain == error_mark_node)
return error_mark_node;
/* Determine the number of arguments. */
int len = -1;
if (purposevec && TREE_CODE (purposevec) == TREE_VEC)
{
len = TREE_VEC_LENGTH (purposevec);
gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec));
}
else if (TREE_CODE (valuevec) == TREE_VEC)
len = TREE_VEC_LENGTH (valuevec);
else
{
/* Since we only performed a partial substitution into
the argument pack, we only RETURN (a single list
node. */
if (purposevec == TREE_PURPOSE (t)
&& valuevec == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
return t;
return tree_cons (purposevec, valuevec, chain);
}
/* Convert the argument vectors into a TREE_LIST. */
for (int i = len; i-- > 0; )
{
purpose = (purposevec ? TREE_VEC_ELT (purposevec, i)
: NULL_TREE);
value = (valuevec ? TREE_VEC_ELT (valuevec, i)
: NULL_TREE);
/* Build the list (backwards). */
chain = hash_tree_cons (purpose, value, chain);
}
return chain;
}
purpose = TREE_PURPOSE (t);
if (purpose)
{
purpose = tsubst (purpose, args, complain, in_decl);
if (purpose == error_mark_node)
return error_mark_node;
}
value = TREE_VALUE (t);
if (value)
{
value = tsubst (value, args, complain, in_decl);
if (value == error_mark_node)
return error_mark_node;
}
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
{
chain = tsubst (chain, args, complain, in_decl);
if (chain == error_mark_node)
return error_mark_node;
}
if (purpose == TREE_PURPOSE (t)
&& value == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
return t;
return hash_tree_cons (purpose, value, chain);
}
case TREE_BINFO:
/* We should never be tsubsting a binfo. */
gcc_unreachable ();
case TREE_VEC:
/* A vector of template arguments. */
gcc_assert (!type);
return tsubst_template_args (t, args, complain, in_decl);
case POINTER_TYPE:
case REFERENCE_TYPE:
{
if (type == TREE_TYPE (t) && TREE_CODE (type) != METHOD_TYPE)
return t;
/* [temp.deduct]
Type deduction may fail for any of the following
reasons:
-- Attempting to create a pointer to reference type.
-- Attempting to create a reference to a reference type or
a reference to void.
Core issue 106 says that creating a reference to a reference
during instantiation is no longer a cause for failure. We
only enforce this check in strict C++98 mode. */
if ((TYPE_REF_P (type)
&& (((cxx_dialect == cxx98) && flag_iso) || code != REFERENCE_TYPE))
|| (code == REFERENCE_TYPE && VOID_TYPE_P (type)))
{
static location_t last_loc;
/* We keep track of the last time we issued this error
message to avoid spewing a ton of messages during a
single bad template instantiation. */
if (complain & tf_error
&& last_loc != input_location)
{
if (VOID_TYPE_P (type))
error ("forming reference to void");
else if (code == POINTER_TYPE)
error ("forming pointer to reference type %qT", type);
else
error ("forming reference to reference type %qT", type);
last_loc = input_location;
}
return error_mark_node;
}
else if (TREE_CODE (type) == FUNCTION_TYPE
&& (type_memfn_quals (type) != TYPE_UNQUALIFIED
|| type_memfn_rqual (type) != REF_QUAL_NONE))
{
if (complain & tf_error)
{
if (code == POINTER_TYPE)
error ("forming pointer to qualified function type %qT",
type);
else
error ("forming reference to qualified function type %qT",
type);
}
return error_mark_node;
}
else if (code == POINTER_TYPE)
{
r = build_pointer_type (type);
if (TREE_CODE (type) == METHOD_TYPE)
r = build_ptrmemfunc_type (r);
}
else if (TYPE_REF_P (type))
/* In C++0x, during template argument substitution, when there is an
attempt to create a reference to a reference type, reference
collapsing is applied as described in [14.3.1/4 temp.arg.type]:
"If a template-argument for a template-parameter T names a type
that is a reference to a type A, an attempt to create the type
'lvalue reference to cv T' creates the type 'lvalue reference to
A,' while an attempt to create the type type rvalue reference to
cv T' creates the type T"
*/
r = cp_build_reference_type
(TREE_TYPE (type),
TYPE_REF_IS_RVALUE (t) && TYPE_REF_IS_RVALUE (type));
else
r = cp_build_reference_type (type, TYPE_REF_IS_RVALUE (t));
r = cp_build_qualified_type_real (r, cp_type_quals (t), complain);
if (r != error_mark_node)
/* Will this ever be needed for TYPE_..._TO values? */
layout_type (r);
return r;
}
case OFFSET_TYPE:
{
r = tsubst (TYPE_OFFSET_BASETYPE (t), args, complain, in_decl);
if (r == error_mark_node || !MAYBE_CLASS_TYPE_P (r))
{
/* [temp.deduct]
Type deduction may fail for any of the following
reasons:
-- Attempting to create "pointer to member of T" when T
is not a class type. */
if (complain & tf_error)
error ("creating pointer to member of non-class type %qT", r);
return error_mark_node;
}
if (TYPE_REF_P (type))
{
if (complain & tf_error)
error ("creating pointer to member reference type %qT", type);
return error_mark_node;
}
if (VOID_TYPE_P (type))
{
if (complain & tf_error)
error ("creating pointer to member of type void");
return error_mark_node;
}
gcc_assert (TREE_CODE (type) != METHOD_TYPE);
if (TREE_CODE (type) == FUNCTION_TYPE)
{
/* The type of the implicit object parameter gets its
cv-qualifiers from the FUNCTION_TYPE. */
tree memptr;
tree method_type
= build_memfn_type (type, r, type_memfn_quals (type),
type_memfn_rqual (type));
memptr = build_ptrmemfunc_type (build_pointer_type (method_type));
return cp_build_qualified_type_real (memptr, cp_type_quals (t),
complain);
}
else
return cp_build_qualified_type_real (build_ptrmem_type (r, type),
cp_type_quals (t),
complain);
}
case FUNCTION_TYPE:
case METHOD_TYPE:
{
tree fntype;
tree specs;
fntype = tsubst_function_type (t, args, complain, in_decl);
if (fntype == error_mark_node)
return error_mark_node;
/* Substitute the exception specification. */
specs = tsubst_exception_specification (t, args, complain, in_decl,
/*defer_ok*/fndecl_type);
if (specs == error_mark_node)
return error_mark_node;
if (specs)
fntype = build_exception_variant (fntype, specs);
return fntype;
}
case ARRAY_TYPE:
{
tree domain = tsubst (TYPE_DOMAIN (t), args, complain, in_decl);
if (domain == error_mark_node)
return error_mark_node;
/* As an optimization, we avoid regenerating the array type if
it will obviously be the same as T. */
if (type == TREE_TYPE (t) && domain == TYPE_DOMAIN (t))
return t;
/* These checks should match the ones in create_array_type_for_decl.
[temp.deduct]
The deduction may fail for any of the following reasons:
-- Attempting to create an array with an element type that
is void, a function type, or a reference type, or [DR337]
an abstract class type. */
if (VOID_TYPE_P (type)
|| TREE_CODE (type) == FUNCTION_TYPE
|| (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
|| TYPE_REF_P (type))
{
if (complain & tf_error)
error ("creating array of %qT", type);
return error_mark_node;
}
if (!verify_type_context (input_location, TCTX_ARRAY_ELEMENT, type,
!(complain & tf_error)))
return error_mark_node;
if (abstract_virtuals_error_sfinae (ACU_ARRAY, type, complain))
return error_mark_node;
r = build_cplus_array_type (type, domain);
if (!valid_array_size_p (input_location, r, in_decl,
(complain & tf_error)))
return error_mark_node;
if (TYPE_USER_ALIGN (t))
{
SET_TYPE_ALIGN (r, TYPE_ALIGN (t));
TYPE_USER_ALIGN (r) = 1;
}
return r;
}
case TYPENAME_TYPE:
{
tree ctx = TYPE_CONTEXT (t);
if (TREE_CODE (ctx) == TYPE_PACK_EXPANSION)
{
ctx = tsubst_pack_expansion (ctx, args, complain, in_decl);
if (ctx == error_mark_node
|| TREE_VEC_LENGTH (ctx) > 1)
return error_mark_node;
if (TREE_VEC_LENGTH (ctx) == 0)
{
if (complain & tf_error)
error ("%qD is instantiated for an empty pack",
TYPENAME_TYPE_FULLNAME (t));
return error_mark_node;
}
ctx = TREE_VEC_ELT (ctx, 0);
}
else
ctx = tsubst_aggr_type (ctx, args, complain, in_decl,
/*entering_scope=*/1);
if (ctx == error_mark_node)
return error_mark_node;
tree f = tsubst_copy (TYPENAME_TYPE_FULLNAME (t), args,
complain, in_decl);
if (f == error_mark_node)
return error_mark_node;
if (!MAYBE_CLASS_TYPE_P (ctx))
{
if (complain & tf_error)
error ("%qT is not a class, struct, or union type", ctx);
return error_mark_node;
}
else if (!uses_template_parms (ctx) && !TYPE_BEING_DEFINED (ctx))
{
/* Normally, make_typename_type does not require that the CTX
have complete type in order to allow things like:
template <class T> struct S { typename S<T>::X Y; };
But, such constructs have already been resolved by this
point, so here CTX really should have complete type, unless
it's a partial instantiation. */
ctx = complete_type (ctx);
if (!COMPLETE_TYPE_P (ctx))
{
if (complain & tf_error)
cxx_incomplete_type_error (NULL_TREE, ctx);
return error_mark_node;
}
}
f = make_typename_type (ctx, f, typename_type,
complain | tf_keep_type_decl);
if (f == error_mark_node)
return f;
if (TREE_CODE (f) == TYPE_DECL)
{
complain |= tf_ignore_bad_quals;
f = TREE_TYPE (f);
}
if (TREE_CODE (f) != TYPENAME_TYPE)
{
if (TYPENAME_IS_ENUM_P (t) && TREE_CODE (f) != ENUMERAL_TYPE)
{
if (complain & tf_error)
error ("%qT resolves to %qT, which is not an enumeration type",
t, f);
else
return error_mark_node;
}
else if (TYPENAME_IS_CLASS_P (t) && !CLASS_TYPE_P (f))
{
if (complain & tf_error)
error ("%qT resolves to %qT, which is not a class type",
t, f);
else
return error_mark_node;
}
}
return cp_build_qualified_type_real
(f, cp_type_quals (f) | cp_type_quals (t), complain);
}
case UNBOUND_CLASS_TEMPLATE:
{
tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain,
in_decl, /*entering_scope=*/1);
tree name = TYPE_IDENTIFIER (t);
tree parm_list = DECL_TEMPLATE_PARMS (TYPE_NAME (t));
if (ctx == error_mark_node || name == error_mark_node)
return error_mark_node;
if (parm_list)
parm_list = tsubst_template_parms (parm_list, args, complain);
return make_unbound_class_template (ctx, name, parm_list, complain);
}
case TYPEOF_TYPE:
{
tree type;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
type = tsubst_expr (TYPEOF_TYPE_EXPR (t), args,
complain, in_decl,
/*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
type = finish_typeof (type);
return cp_build_qualified_type_real (type,
cp_type_quals (t)
| cp_type_quals (type),
complain);
}
case DECLTYPE_TYPE:
{
tree type;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
type = tsubst_copy_and_build (DECLTYPE_TYPE_EXPR (t), args,
complain|tf_decltype, in_decl,
/*function_p*/false,
/*integral_constant_expression*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
if (DECLTYPE_FOR_LAMBDA_CAPTURE (t))
type = lambda_capture_field_type (type,
false /*explicit_init*/,
DECLTYPE_FOR_REF_CAPTURE (t));
else if (DECLTYPE_FOR_LAMBDA_PROXY (t))
type = lambda_proxy_type (type);
else
{
bool id = DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (t);
if (id && TREE_CODE (DECLTYPE_TYPE_EXPR (t)) == BIT_NOT_EXPR
&& EXPR_P (type))
/* In a template ~id could be either a complement expression
or an unqualified-id naming a destructor; if instantiating
it produces an expression, it's not an id-expression or
member access. */
id = false;
type = finish_decltype_type (type, id, complain);
}
return cp_build_qualified_type_real (type,
cp_type_quals (t)
| cp_type_quals (type),
complain | tf_ignore_bad_quals);
}
case UNDERLYING_TYPE:
{
tree type = tsubst (UNDERLYING_TYPE_TYPE (t), args,
complain, in_decl);
return finish_underlying_type (type);
}
case TYPE_ARGUMENT_PACK:
case NONTYPE_ARGUMENT_PACK:
{
tree r;
if (code == NONTYPE_ARGUMENT_PACK)
r = make_node (code);
else
r = cxx_make_type (code);
tree pack_args = ARGUMENT_PACK_ARGS (t);
pack_args = tsubst_template_args (pack_args, args, complain, in_decl);
SET_ARGUMENT_PACK_ARGS (r, pack_args);
return r;
}
case VOID_CST:
case INTEGER_CST:
case REAL_CST:
case STRING_CST:
case PLUS_EXPR:
case MINUS_EXPR:
case NEGATE_EXPR:
case NOP_EXPR:
case INDIRECT_REF:
case ADDR_EXPR:
case CALL_EXPR:
case ARRAY_REF:
case SCOPE_REF:
/* We should use one of the expression tsubsts for these codes. */
gcc_unreachable ();
default:
sorry ("use of %qs in template", get_tree_code_name (code));
return error_mark_node;
}
}
/* tsubst a BASELINK. OBJECT_TYPE, if non-NULL, is the type of the
expression on the left-hand side of the "." or "->" operator. We
only do the lookup if we had a dependent BASELINK. Otherwise we
adjust it onto the instantiated heirarchy. */
static tree
tsubst_baselink (tree baselink, tree object_type,
tree args, tsubst_flags_t complain, tree in_decl)
{
bool qualified_p = BASELINK_QUALIFIED_P (baselink);
tree qualifying_scope = BINFO_TYPE (BASELINK_ACCESS_BINFO (baselink));
qualifying_scope = tsubst (qualifying_scope, args, complain, in_decl);
tree optype = BASELINK_OPTYPE (baselink);
optype = tsubst (optype, args, complain, in_decl);
tree template_args = NULL_TREE;
bool template_id_p = false;
tree fns = BASELINK_FUNCTIONS (baselink);
if (TREE_CODE (fns) == TEMPLATE_ID_EXPR)
{
template_id_p = true;
template_args = TREE_OPERAND (fns, 1);
fns = TREE_OPERAND (fns, 0);
if (template_args)
template_args = tsubst_template_args (template_args, args,
complain, in_decl);
}
tree binfo_type = BINFO_TYPE (BASELINK_BINFO (baselink));
binfo_type = tsubst (binfo_type, args, complain, in_decl);
bool dependent_p = binfo_type != BINFO_TYPE (BASELINK_BINFO (baselink));
if (dependent_p)
{
tree name = OVL_NAME (fns);
if (IDENTIFIER_CONV_OP_P (name))
name = make_conv_op_name (optype);
if (name == complete_dtor_identifier)
/* Treat as-if non-dependent below. */
dependent_p = false;
baselink = lookup_fnfields (qualifying_scope, name, /*protect=*/1);
if (!baselink)
{
if ((complain & tf_error)
&& constructor_name_p (name, qualifying_scope))
error ("cannot call constructor %<%T::%D%> directly",
qualifying_scope, name);
return error_mark_node;
}
if (BASELINK_P (baselink))
fns = BASELINK_FUNCTIONS (baselink);
}
else
/* We're going to overwrite pieces below, make a duplicate. */
baselink = copy_node (baselink);
/* If lookup found a single function, mark it as used at this point.
(If lookup found multiple functions the one selected later by
overload resolution will be marked as used at that point.) */
if (!template_id_p && !really_overloaded_fn (fns))
{
tree fn = OVL_FIRST (fns);
bool ok = mark_used (fn, complain);
if (!ok && !(complain & tf_error))
return error_mark_node;
if (ok && BASELINK_P (baselink))
/* We might have instantiated an auto function. */
TREE_TYPE (baselink) = TREE_TYPE (fn);
}
if (BASELINK_P (baselink))
{
/* Add back the template arguments, if present. */
if (template_id_p)
BASELINK_FUNCTIONS (baselink)
= build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, template_args);
/* Update the conversion operator type. */
BASELINK_OPTYPE (baselink) = optype;
}
if (!object_type)
object_type = current_class_type;
if (qualified_p || !dependent_p)
{
baselink = adjust_result_of_qualified_name_lookup (baselink,
qualifying_scope,
object_type);
if (!qualified_p)
/* We need to call adjust_result_of_qualified_name_lookup in case the
destructor names a base class, but we unset BASELINK_QUALIFIED_P
so that we still get virtual function binding. */
BASELINK_QUALIFIED_P (baselink) = false;
}
return baselink;
}
/* Like tsubst_expr for a SCOPE_REF, given by QUALIFIED_ID. DONE is
true if the qualified-id will be a postfix-expression in-and-of
itself; false if more of the postfix-expression follows the
QUALIFIED_ID. ADDRESS_P is true if the qualified-id is the operand
of "&". */
static tree
tsubst_qualified_id (tree qualified_id, tree args,
tsubst_flags_t complain, tree in_decl,
bool done, bool address_p)
{
tree expr;
tree scope;
tree name;
bool is_template;
tree template_args;
location_t loc = UNKNOWN_LOCATION;
gcc_assert (TREE_CODE (qualified_id) == SCOPE_REF);
/* Figure out what name to look up. */
name = TREE_OPERAND (qualified_id, 1);
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
{
is_template = true;
loc = EXPR_LOCATION (name);
template_args = TREE_OPERAND (name, 1);
if (template_args)
template_args = tsubst_template_args (template_args, args,
complain, in_decl);
if (template_args == error_mark_node)
return error_mark_node;
name = TREE_OPERAND (name, 0);
}
else
{
is_template = false;
template_args = NULL_TREE;
}
/* Substitute into the qualifying scope. When there are no ARGS, we
are just trying to simplify a non-dependent expression. In that
case the qualifying scope may be dependent, and, in any case,
substituting will not help. */
scope = TREE_OPERAND (qualified_id, 0);
if (args)
{
scope = tsubst (scope, args, complain, in_decl);
expr = tsubst_copy (name, args, complain, in_decl);
}
else
expr = name;
if (dependent_scope_p (scope))
{
if (is_template)
expr = build_min_nt_loc (loc, TEMPLATE_ID_EXPR, expr, template_args);
tree r = build_qualified_name (NULL_TREE, scope, expr,
QUALIFIED_NAME_IS_TEMPLATE (qualified_id));
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (qualified_id);
return r;
}
if (!BASELINK_P (name) && !DECL_P (expr))
{
if (TREE_CODE (expr) == BIT_NOT_EXPR)
{
/* A BIT_NOT_EXPR is used to represent a destructor. */
if (!check_dtor_name (scope, TREE_OPERAND (expr, 0)))
{
error ("qualifying type %qT does not match destructor name ~%qT",
scope, TREE_OPERAND (expr, 0));
expr = error_mark_node;
}
else
expr = lookup_qualified_name (scope, complete_dtor_identifier,
/*is_type_p=*/0, false);
}
else
expr = lookup_qualified_name (scope, expr, /*is_type_p=*/0, false);
if (TREE_CODE (TREE_CODE (expr) == TEMPLATE_DECL
? DECL_TEMPLATE_RESULT (expr) : expr) == TYPE_DECL)
{
if (complain & tf_error)
{
error ("dependent-name %qE is parsed as a non-type, but "
"instantiation yields a type", qualified_id);
inform (input_location, "say %<typename %E%> if a type is meant", qualified_id);
}
return error_mark_node;
}
}
if (DECL_P (expr))
{
check_accessibility_of_qualified_id (expr, /*object_type=*/NULL_TREE,
scope);
/* Remember that there was a reference to this entity. */
if (!mark_used (expr, complain) && !(complain & tf_error))
return error_mark_node;
}
if (expr == error_mark_node || TREE_CODE (expr) == TREE_LIST)
{
if (complain & tf_error)
qualified_name_lookup_error (scope,
TREE_OPERAND (qualified_id, 1),
expr, input_location);
return error_mark_node;
}
if (is_template)
{
/* We may be repeating a check already done during parsing, but
if it was well-formed and passed then, it will pass again
now, and if it didn't, we wouldn't have got here. The case
we want to catch is when we couldn't tell then, and can now,
namely when templ prior to substitution was an
identifier. */
if (flag_concepts && check_auto_in_tmpl_args (expr, template_args))
return error_mark_node;
if (variable_template_p (expr))
expr = lookup_and_finish_template_variable (expr, template_args,
complain);
else
expr = lookup_template_function (expr, template_args);
}
if (expr == error_mark_node && complain & tf_error)
qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1),
expr, input_location);
else if (TYPE_P (scope))
{
expr = (adjust_result_of_qualified_name_lookup
(expr, scope, current_nonlambda_class_type ()));
expr = (finish_qualified_id_expr
(scope, expr, done, address_p && PTRMEM_OK_P (qualified_id),
QUALIFIED_NAME_IS_TEMPLATE (qualified_id),
/*template_arg_p=*/false, complain));
}
/* Expressions do not generally have reference type. */
if (TREE_CODE (expr) != SCOPE_REF
/* However, if we're about to form a pointer-to-member, we just
want the referenced member referenced. */
&& TREE_CODE (expr) != OFFSET_REF)
expr = convert_from_reference (expr);
if (REF_PARENTHESIZED_P (qualified_id))
expr = force_paren_expr (expr);
return expr;
}
/* tsubst the initializer for a VAR_DECL. INIT is the unsubstituted
initializer, DECL is the substituted VAR_DECL. Other arguments are as
for tsubst. */
static tree
tsubst_init (tree init, tree decl, tree args,
tsubst_flags_t complain, tree in_decl)
{
if (!init)
return NULL_TREE;
init = tsubst_expr (init, args, complain, in_decl, false);
tree type = TREE_TYPE (decl);
if (!init && type != error_mark_node)
{
if (tree auto_node = type_uses_auto (type))
{
if (!CLASS_PLACEHOLDER_TEMPLATE (auto_node))
{
if (complain & tf_error)
error ("initializer for %q#D expands to an empty list "
"of expressions", decl);
return error_mark_node;
}
}
else if (!dependent_type_p (type))
{
/* If we had an initializer but it
instantiated to nothing,
value-initialize the object. This will
only occur when the initializer was a
pack expansion where the parameter packs
used in that expansion were of length
zero. */
init = build_value_init (type, complain);
if (TREE_CODE (init) == AGGR_INIT_EXPR)
init = get_target_expr_sfinae (init, complain);
if (TREE_CODE (init) == TARGET_EXPR)
TARGET_EXPR_DIRECT_INIT_P (init) = true;
}
}
return init;
}
/* If T is a reference to a dependent member of the current instantiation C and
we are trying to refer to that member in a partial instantiation of C,
return a SCOPE_REF; otherwise, return NULL_TREE.
This can happen when forming a C++20 alias template deduction guide, as in
PR96199. */
static tree
maybe_dependent_member_ref (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
if (cxx_dialect < cxx2a)
return NULL_TREE;
tree ctx = context_for_name_lookup (t);
if (!CLASS_TYPE_P (ctx))
return NULL_TREE;
ctx = tsubst (ctx, args, complain, in_decl);
if (dependent_scope_p (ctx))
return build_qualified_name (NULL_TREE, ctx, DECL_NAME (t),
/*template_p=*/false);
return NULL_TREE;
}
/* Like tsubst, but deals with expressions. This function just replaces
template parms; to finish processing the resultant expression, use
tsubst_copy_and_build or tsubst_expr. */
static tree
tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
enum tree_code code;
tree r;
if (t == NULL_TREE || t == error_mark_node || args == NULL_TREE)
return t;
code = TREE_CODE (t);
switch (code)
{
case PARM_DECL:
r = retrieve_local_specialization (t);
if (r == NULL_TREE)
{
/* We get here for a use of 'this' in an NSDMI. */
if (DECL_NAME (t) == this_identifier && current_class_ptr)
return current_class_ptr;
/* This can happen for a parameter name used later in a function
declaration (such as in a late-specified return type). Just
make a dummy decl, since it's only used for its type. */
gcc_assert (cp_unevaluated_operand != 0);
r = tsubst_decl (t, args, complain);
/* Give it the template pattern as its context; its true context
hasn't been instantiated yet and this is good enough for
mangling. */
DECL_CONTEXT (r) = DECL_CONTEXT (t);
}
if (TREE_CODE (r) == ARGUMENT_PACK_SELECT)
r = argument_pack_select_arg (r);
if (!mark_used (r, complain) && !(complain & tf_error))
return error_mark_node;
return r;
case CONST_DECL:
{
tree enum_type;
tree v;
if (DECL_TEMPLATE_PARM_P (t))
return tsubst_copy (DECL_INITIAL (t), args, complain, in_decl);
/* There is no need to substitute into namespace-scope
enumerators. */
if (DECL_NAMESPACE_SCOPE_P (t))
return t;
/* If ARGS is NULL, then T is known to be non-dependent. */
if (args == NULL_TREE)
return scalar_constant_value (t);
if (tree ref = maybe_dependent_member_ref (t, args, complain, in_decl))
return ref;
/* Unfortunately, we cannot just call lookup_name here.
Consider:
template <int I> int f() {
enum E { a = I };
struct S { void g() { E e = a; } };
};
When we instantiate f<7>::S::g(), say, lookup_name is not
clever enough to find f<7>::a. */
enum_type
= tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl,
/*entering_scope=*/0);
for (v = TYPE_VALUES (enum_type);
v != NULL_TREE;
v = TREE_CHAIN (v))
if (TREE_PURPOSE (v) == DECL_NAME (t))
return TREE_VALUE (v);
/* We didn't find the name. That should never happen; if
name-lookup found it during preliminary parsing, we
should find it again here during instantiation. */
gcc_unreachable ();
}
return t;
case FIELD_DECL:
if (DECL_CONTEXT (t))
{
tree ctx;
ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl,
/*entering_scope=*/1);
if (ctx != DECL_CONTEXT (t))
{
tree r = lookup_field (ctx, DECL_NAME (t), 0, false);
if (!r)
{
if (complain & tf_error)
error ("using invalid field %qD", t);
return error_mark_node;
}
return r;
}
}
return t;
case VAR_DECL:
if (tree ref = maybe_dependent_member_ref (t, args, complain, in_decl))
return ref;
gcc_fallthrough();
case FUNCTION_DECL:
if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t))
r = tsubst (t, args, complain, in_decl);
else if (local_variable_p (t)
&& uses_template_parms (DECL_CONTEXT (t)))
{
r = retrieve_local_specialization (t);
if (r == NULL_TREE)
{
/* First try name lookup to find the instantiation. */
r = lookup_name (DECL_NAME (t));
if (r)
{
if (!VAR_P (r))
{
/* During error-recovery we may find a non-variable,
even an OVERLOAD: just bail out and avoid ICEs and
duplicate diagnostics (c++/62207). */
gcc_assert (seen_error ());
return error_mark_node;
}
if (!is_capture_proxy (r))
{
/* Make sure the one we found is the one we want. */
tree ctx = enclosing_instantiation_of (DECL_CONTEXT (t));
if (ctx != DECL_CONTEXT (r))
r = NULL_TREE;
}
}
if (r)
/* OK */;
else
{
/* This can happen for a variable used in a
late-specified return type of a local lambda, or for a
local static or constant. Building a new VAR_DECL
should be OK in all those cases. */
r = tsubst_decl (t, args, complain);
if (local_specializations)
/* Avoid infinite recursion (79640). */
register_local_specialization (r, t);
if (decl_maybe_constant_var_p (r))
{
/* We can't call cp_finish_decl, so handle the
initializer by hand. */
tree init = tsubst_init (DECL_INITIAL (t), r, args,
complain, in_decl);
if (!processing_template_decl)
init = maybe_constant_init (init);
if (processing_template_decl
? potential_constant_expression (init)
: reduced_constant_expression_p (init))
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r)
= TREE_CONSTANT (r) = true;
DECL_INITIAL (r) = init;
if (tree auto_node = type_uses_auto (TREE_TYPE (r)))
TREE_TYPE (r)
= do_auto_deduction (TREE_TYPE (r), init, auto_node,
complain, adc_variable_type);
}
gcc_assert (cp_unevaluated_operand || TREE_STATIC (r)
|| decl_constant_var_p (r)
|| seen_error ());
if (!processing_template_decl
&& !TREE_STATIC (r))
r = process_outer_var_ref (r, complain);
}
/* Remember this for subsequent uses. */
if (local_specializations)
register_local_specialization (r, t);
}
if (TREE_CODE (r) == ARGUMENT_PACK_SELECT)
r = argument_pack_select_arg (r);
}
else
r = t;
if (!mark_used (r, complain))
return error_mark_node;
return r;
case NAMESPACE_DECL:
return t;
case OVERLOAD:
return t;
case BASELINK:
return tsubst_baselink (t, current_nonlambda_class_type (),
args, complain, in_decl);
case TEMPLATE_DECL:
if (DECL_TEMPLATE_TEMPLATE_PARM_P (t))
return tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)),
args, complain, in_decl);
else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t))
return tsubst (t, args, complain, in_decl);
else if (DECL_CLASS_SCOPE_P (t)
&& uses_template_parms (DECL_CONTEXT (t)))
{
/* Template template argument like the following example need
special treatment:
template <template <class> class TT> struct C {};
template <class T> struct D {
template <class U> struct E {};
C<E> c; // #1
};
D<int> d; // #2
We are processing the template argument `E' in #1 for
the template instantiation #2. Originally, `E' is a
TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we
have to substitute this with one having context `D<int>'. */
tree context = tsubst (DECL_CONTEXT (t), args, complain, in_decl);
if (dependent_scope_p (context))
{
/* When rewriting a constructor into a deduction guide, a
non-dependent name can become dependent, so memtmpl<args>
becomes context::template memtmpl<args>. */
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return build_qualified_name (type, context, DECL_NAME (t),
/*template*/true);
}
return lookup_field (context, DECL_NAME(t), 0, false);
}
else
/* Ordinary template template argument. */
return t;
case NON_LVALUE_EXPR:
case VIEW_CONVERT_EXPR:
{
/* Handle location wrappers by substituting the wrapped node
first, *then* reusing the resulting type. Doing the type
first ensures that we handle template parameters and
parameter pack expansions. */
if (location_wrapper_p (t))
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args,
complain, in_decl);
return maybe_wrap_with_location (op0, EXPR_LOCATION (t));
}
tree op = TREE_OPERAND (t, 0);
if (code == VIEW_CONVERT_EXPR
&& TREE_CODE (op) == TEMPLATE_PARM_INDEX)
{
/* Wrapper to make a C++20 template parameter object const. */
op = tsubst_copy (op, args, complain, in_decl);
if (TREE_CODE (op) == TEMPLATE_PARM_INDEX)
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return build1 (code, type, op);
}
else if (!CP_TYPE_CONST_P (TREE_TYPE (op)))
{
/* The template argument is not const, presumably because
it is still dependent, and so not the const template parm
object. */
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
gcc_checking_assert (same_type_ignoring_top_level_qualifiers_p
(type, TREE_TYPE (op)));
if (TREE_CODE (op) == CONSTRUCTOR
|| TREE_CODE (op) == IMPLICIT_CONV_EXPR)
{
/* Don't add a wrapper to these. */
op = copy_node (op);
TREE_TYPE (op) = type;
}
else
/* Do add a wrapper otherwise. */
op = build1 (code, type, op);
}
return op;
}
/* force_paren_expr can also create a VIEW_CONVERT_EXPR. */
else if (code == VIEW_CONVERT_EXPR && REF_PARENTHESIZED_P (t))
{
op = tsubst_copy (op, args, complain, in_decl);
op = build1 (code, TREE_TYPE (op), op);
REF_PARENTHESIZED_P (op) = true;
return op;
}
/* We shouldn't see any other uses of these in templates. */
gcc_unreachable ();
}
case CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case STATIC_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case CONVERT_EXPR:
case NOP_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
return build1 (code, type, op0);
}
case SIZEOF_EXPR:
if (PACK_EXPANSION_P (TREE_OPERAND (t, 0))
|| ARGUMENT_PACK_P (TREE_OPERAND (t, 0)))
{
tree expanded, op = TREE_OPERAND (t, 0);
int len = 0;
if (SIZEOF_EXPR_TYPE_P (t))
op = TREE_TYPE (op);
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
/* We only want to compute the number of arguments. */
if (PACK_EXPANSION_P (op))
expanded = tsubst_pack_expansion (op, args, complain, in_decl);
else
expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op),
args, complain, in_decl);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
if (TREE_CODE (expanded) == TREE_VEC)
{
len = TREE_VEC_LENGTH (expanded);
/* Set TREE_USED for the benefit of -Wunused. */
for (int i = 0; i < len; i++)
if (DECL_P (TREE_VEC_ELT (expanded, i)))
TREE_USED (TREE_VEC_ELT (expanded, i)) = true;
}
if (expanded == error_mark_node)
return error_mark_node;
else if (PACK_EXPANSION_P (expanded)
|| (TREE_CODE (expanded) == TREE_VEC
&& pack_expansion_args_count (expanded)))
{
if (PACK_EXPANSION_P (expanded))
/* OK. */;
else if (TREE_VEC_LENGTH (expanded) == 1)
expanded = TREE_VEC_ELT (expanded, 0);
else
expanded = make_argument_pack (expanded);
if (TYPE_P (expanded))
return cxx_sizeof_or_alignof_type (input_location,
expanded, SIZEOF_EXPR,
false,
complain & tf_error);
else
return cxx_sizeof_or_alignof_expr (input_location,
expanded, SIZEOF_EXPR,
complain & tf_error);
}
else
return build_int_cst (size_type_node, len);
}
if (SIZEOF_EXPR_TYPE_P (t))
{
r = tsubst (TREE_TYPE (TREE_OPERAND (t, 0)),
args, complain, in_decl);
r = build1 (NOP_EXPR, r, error_mark_node);
r = build1 (SIZEOF_EXPR,
tsubst (TREE_TYPE (t), args, complain, in_decl), r);
SIZEOF_EXPR_TYPE_P (r) = 1;
return r;
}
/* Fall through */
case INDIRECT_REF:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
case BIT_NOT_EXPR:
case ADDR_EXPR:
case UNARY_PLUS_EXPR: /* Unary + */
case ALIGNOF_EXPR:
case AT_ENCODE_EXPR:
case ARROW_EXPR:
case THROW_EXPR:
case TYPEID_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case PAREN_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
r = build1 (code, type, op0);
if (code == ALIGNOF_EXPR)
ALIGNOF_EXPR_STD_P (r) = ALIGNOF_EXPR_STD_P (t);
return r;
}
case COMPONENT_REF:
{
tree object;
tree name;
object = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
name = TREE_OPERAND (t, 1);
if (TREE_CODE (name) == BIT_NOT_EXPR)
{
name = tsubst_copy (TREE_OPERAND (name, 0), args,
complain, in_decl);
name = build1 (BIT_NOT_EXPR, NULL_TREE, name);
}
else if (TREE_CODE (name) == SCOPE_REF
&& TREE_CODE (TREE_OPERAND (name, 1)) == BIT_NOT_EXPR)
{
tree base = tsubst_copy (TREE_OPERAND (name, 0), args,
complain, in_decl);
name = TREE_OPERAND (name, 1);
name = tsubst_copy (TREE_OPERAND (name, 0), args,
complain, in_decl);
name = build1 (BIT_NOT_EXPR, NULL_TREE, name);
name = build_qualified_name (/*type=*/NULL_TREE,
base, name,
/*template_p=*/false);
}
else if (BASELINK_P (name))
name = tsubst_baselink (name,
non_reference (TREE_TYPE (object)),
args, complain,
in_decl);
else
name = tsubst_copy (name, args, complain, in_decl);
return build_nt (COMPONENT_REF, object, name, NULL_TREE);
}
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case RSHIFT_EXPR:
case LSHIFT_EXPR:
case EQ_EXPR:
case NE_EXPR:
case MAX_EXPR:
case MIN_EXPR:
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
case COMPOUND_EXPR:
case DOTSTAR_EXPR:
case MEMBER_REF:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
return build_nt (code, op0, op1);
}
case SCOPE_REF:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
return build_qualified_name (/*type=*/NULL_TREE, op0, op1,
QUALIFIED_NAME_IS_TEMPLATE (t));
}
case ARRAY_REF:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
return build_nt (ARRAY_REF, op0, op1, NULL_TREE, NULL_TREE);
}
case CALL_EXPR:
{
int n = VL_EXP_OPERAND_LENGTH (t);
tree result = build_vl_exp (CALL_EXPR, n);
int i;
for (i = 0; i < n; i++)
TREE_OPERAND (t, i) = tsubst_copy (TREE_OPERAND (t, i), args,
complain, in_decl);
return result;
}
case COND_EXPR:
case MODOP_EXPR:
case PSEUDO_DTOR_EXPR:
case VEC_PERM_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl);
r = build_nt (code, op0, op1, op2);
TREE_NO_WARNING (r) = TREE_NO_WARNING (t);
return r;
}
case NEW_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl);
r = build_nt (code, op0, op1, op2);
NEW_EXPR_USE_GLOBAL (r) = NEW_EXPR_USE_GLOBAL (t);
return r;
}
case DELETE_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
r = build_nt (code, op0, op1);
DELETE_EXPR_USE_GLOBAL (r) = DELETE_EXPR_USE_GLOBAL (t);
DELETE_EXPR_USE_VEC (r) = DELETE_EXPR_USE_VEC (t);
return r;
}
case TEMPLATE_ID_EXPR:
{
/* Substituted template arguments */
tree fn = TREE_OPERAND (t, 0);
tree targs = TREE_OPERAND (t, 1);
fn = tsubst_copy (fn, args, complain, in_decl);
if (targs)
targs = tsubst_template_args (targs, args, complain, in_decl);
return lookup_template_function (fn, targs);
}
case TREE_LIST:
{
tree purpose, value, chain;
if (t == void_list_node)
return t;
purpose = TREE_PURPOSE (t);
if (purpose)
purpose = tsubst_copy (purpose, args, complain, in_decl);
value = TREE_VALUE (t);
if (value)
value = tsubst_copy (value, args, complain, in_decl);
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = tsubst_copy (chain, args, complain, in_decl);
if (purpose == TREE_PURPOSE (t)
&& value == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
return t;
return tree_cons (purpose, value, chain);
}
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
case INTEGER_TYPE:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
case POINTER_TYPE:
case REFERENCE_TYPE:
case OFFSET_TYPE:
case FUNCTION_TYPE:
case METHOD_TYPE:
case ARRAY_TYPE:
case TYPENAME_TYPE:
case UNBOUND_CLASS_TEMPLATE:
case TYPEOF_TYPE:
case DECLTYPE_TYPE:
case TYPE_DECL:
return tsubst (t, args, complain, in_decl);
case USING_DECL:
t = DECL_NAME (t);
/* Fall through. */
case IDENTIFIER_NODE:
if (IDENTIFIER_CONV_OP_P (t))
{
tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return make_conv_op_name (new_type);
}
else
return t;
case CONSTRUCTOR:
/* This is handled by tsubst_copy_and_build. */
gcc_unreachable ();
case VA_ARG_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return build_x_va_arg (EXPR_LOCATION (t), op0, type);
}
case CLEANUP_POINT_EXPR:
/* We shouldn't have built any of these during initial template
generation. Instead, they should be built during instantiation
in response to the saved STMT_IS_FULL_EXPR_P setting. */
gcc_unreachable ();
case OFFSET_REF:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
r = build2 (code, type, op0, op1);
PTRMEM_OK_P (r) = PTRMEM_OK_P (t);
if (!mark_used (TREE_OPERAND (r, 1), complain)
&& !(complain & tf_error))
return error_mark_node;
return r;
}
case EXPR_PACK_EXPANSION:
error ("invalid use of pack expansion expression");
return error_mark_node;
case NONTYPE_ARGUMENT_PACK:
error ("use %<...%> to expand argument pack");
return error_mark_node;
case VOID_CST:
gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t)));
return t;
case INTEGER_CST:
case REAL_CST:
case COMPLEX_CST:
{
/* Instantiate any typedefs in the type. */
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
r = fold_convert (type, t);
gcc_assert (TREE_CODE (r) == code);
return r;
}
case STRING_CST:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
r = t;
if (type != TREE_TYPE (t))
{
r = copy_node (t);
TREE_TYPE (r) = type;
}
return r;
}
case PTRMEM_CST:
/* These can sometimes show up in a partial instantiation, but never
involve template parms. */
gcc_assert (!uses_template_parms (t));
return t;
case UNARY_LEFT_FOLD_EXPR:
return tsubst_unary_left_fold (t, args, complain, in_decl);
case UNARY_RIGHT_FOLD_EXPR:
return tsubst_unary_right_fold (t, args, complain, in_decl);
case BINARY_LEFT_FOLD_EXPR:
return tsubst_binary_left_fold (t, args, complain, in_decl);
case BINARY_RIGHT_FOLD_EXPR:
return tsubst_binary_right_fold (t, args, complain, in_decl);
case PREDICT_EXPR:
return t;
case DEBUG_BEGIN_STMT:
/* ??? There's no point in copying it for now, but maybe some
day it will contain more information, such as a pointer back
to the containing function, inlined copy or so. */
return t;
case CO_AWAIT_EXPR:
return tsubst_expr (t, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
break;
default:
/* We shouldn't get here, but keep going if !flag_checking. */
if (flag_checking)
gcc_unreachable ();
return t;
}
}
/* Helper function for tsubst_omp_clauses, used for instantiation of
OMP_CLAUSE_DECL of clauses. */
static tree
tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain,
tree in_decl, tree *iterator_cache)
{
if (decl == NULL_TREE)
return NULL_TREE;
/* Handle OpenMP iterators. */
if (TREE_CODE (decl) == TREE_LIST
&& TREE_PURPOSE (decl)
&& TREE_CODE (TREE_PURPOSE (decl)) == TREE_VEC)
{
tree ret;
if (iterator_cache[0] == TREE_PURPOSE (decl))
ret = iterator_cache[1];
else
{
tree *tp = &ret;
begin_scope (sk_omp, NULL);
for (tree it = TREE_PURPOSE (decl); it; it = TREE_CHAIN (it))
{
*tp = copy_node (it);
TREE_VEC_ELT (*tp, 0)
= tsubst_decl (TREE_VEC_ELT (it, 0), args, complain);
TREE_VEC_ELT (*tp, 1)
= tsubst_expr (TREE_VEC_ELT (it, 1), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
TREE_VEC_ELT (*tp, 2)
= tsubst_expr (TREE_VEC_ELT (it, 2), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
TREE_VEC_ELT (*tp, 3)
= tsubst_expr (TREE_VEC_ELT (it, 3), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
TREE_CHAIN (*tp) = NULL_TREE;
tp = &TREE_CHAIN (*tp);
}
TREE_VEC_ELT (ret, 5) = poplevel (1, 1, 0);
iterator_cache[0] = TREE_PURPOSE (decl);
iterator_cache[1] = ret;
}
return build_tree_list (ret, tsubst_omp_clause_decl (TREE_VALUE (decl),
args, complain,
in_decl, NULL));
}
/* Handle an OpenMP array section represented as a TREE_LIST (or
OMP_CLAUSE_DEPEND_KIND). An OMP_CLAUSE_DEPEND (with a depend
kind of OMP_CLAUSE_DEPEND_SINK) can also be represented as a
TREE_LIST. We can handle it exactly the same as an array section
(purpose, value, and a chain), even though the nomenclature
(low_bound, length, etc) is different. */
if (TREE_CODE (decl) == TREE_LIST)
{
tree low_bound
= tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain,
in_decl, NULL);
if (TREE_PURPOSE (decl) == low_bound
&& TREE_VALUE (decl) == length
&& TREE_CHAIN (decl) == chain)
return decl;
tree ret = tree_cons (low_bound, length, chain);
OMP_CLAUSE_DEPEND_SINK_NEGATIVE (ret)
= OMP_CLAUSE_DEPEND_SINK_NEGATIVE (decl);
return ret;
}
tree ret = tsubst_expr (decl, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
/* Undo convert_from_reference tsubst_expr could have called. */
if (decl
&& REFERENCE_REF_P (ret)
&& !REFERENCE_REF_P (decl))
ret = TREE_OPERAND (ret, 0);
return ret;
}
/* Like tsubst_copy, but specifically for OpenMP clauses. */
static tree
tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort,
tree args, tsubst_flags_t complain, tree in_decl)
{
tree new_clauses = NULL_TREE, nc, oc;
tree linear_no_step = NULL_TREE;
tree iterator_cache[2] = { NULL_TREE, NULL_TREE };
for (oc = clauses; oc ; oc = OMP_CLAUSE_CHAIN (oc))
{
nc = copy_node (oc);
OMP_CLAUSE_CHAIN (nc) = new_clauses;
new_clauses = nc;
switch (OMP_CLAUSE_CODE (nc))
{
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_STMT (oc))
{
OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list ();
tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
OMP_CLAUSE_LASTPRIVATE_STMT (nc)
= pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc));
}
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_UNIFORM:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_MAP:
case OMP_CLAUSE__CACHE_:
case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl, iterator_cache);
break;
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
OMP_CLAUSE_OPERAND (nc, 0)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc);
if (TREE_CODE (placeholder) == SCOPE_REF)
{
tree scope = tsubst (TREE_OPERAND (placeholder, 0), args,
complain, in_decl);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (nc)
= build_qualified_name (NULL_TREE, scope,
TREE_OPERAND (placeholder, 1),
false);
}
else
gcc_assert (identifier_p (placeholder));
}
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl, NULL);
break;
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_ALIGNED:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl, NULL);
OMP_CLAUSE_OPERAND (nc, 1)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
break;
case OMP_CLAUSE_LINEAR:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl, NULL);
if (OMP_CLAUSE_LINEAR_STEP (oc) == NULL_TREE)
{
gcc_assert (!linear_no_step);
linear_no_step = nc;
}
else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (oc))
OMP_CLAUSE_LINEAR_STEP (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_LINEAR_STEP (oc), args,
complain, in_decl, NULL);
else
OMP_CLAUSE_LINEAR_STEP (nc)
= tsubst_expr (OMP_CLAUSE_LINEAR_STEP (oc), args, complain,
in_decl,
/*integral_constant_expression_p=*/false);
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_INBRANCH:
case OMP_CLAUSE_NOTINBRANCH:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_FOR:
case OMP_CLAUSE_PARALLEL:
case OMP_CLAUSE_SECTIONS:
case OMP_CLAUSE_TASKGROUP:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
break;
default:
gcc_unreachable ();
}
if ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP)
switch (OMP_CLAUSE_CODE (nc))
{
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_INCLUSIVE:
case OMP_CLAUSE_EXCLUSIVE:
/* tsubst_expr on SCOPE_REF results in returning
finish_non_static_data_member result. Undo that here. */
if (TREE_CODE (OMP_CLAUSE_DECL (oc)) == SCOPE_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (oc), 1))
== IDENTIFIER_NODE))
{
tree t = OMP_CLAUSE_DECL (nc);
tree v = t;
while (v)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& DECL_NAME (v) == this_identifier)
OMP_CLAUSE_DECL (nc) = TREE_OPERAND (t, 1);
/* FALLTHRU */
default:
v = NULL_TREE;
break;
}
}
else if (VAR_P (OMP_CLAUSE_DECL (oc))
&& DECL_HAS_VALUE_EXPR_P (OMP_CLAUSE_DECL (oc))
&& DECL_ARTIFICIAL (OMP_CLAUSE_DECL (oc))
&& DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (oc))
&& DECL_OMP_PRIVATIZED_MEMBER (OMP_CLAUSE_DECL (oc)))
{
tree decl = OMP_CLAUSE_DECL (nc);
if (VAR_P (decl))
{
retrofit_lang_decl (decl);
DECL_OMP_PRIVATIZED_MEMBER (decl) = 1;
}
}
break;
default:
break;
}
}
new_clauses = nreverse (new_clauses);
if (ort != C_ORT_OMP_DECLARE_SIMD)
{
new_clauses = finish_omp_clauses (new_clauses, ort);
if (linear_no_step)
for (nc = new_clauses; nc; nc = OMP_CLAUSE_CHAIN (nc))
if (nc == linear_no_step)
{
OMP_CLAUSE_LINEAR_STEP (nc) = NULL_TREE;
break;
}
}
return new_clauses;
}
/* Like tsubst_copy_and_build, but unshare TREE_LIST nodes. */
static tree
tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
#define RECUR(t) tsubst_copy_asm_operands (t, args, complain, in_decl)
tree purpose, value, chain;
if (t == NULL)
return t;
if (TREE_CODE (t) != TREE_LIST)
return tsubst_copy_and_build (t, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
if (t == void_list_node)
return t;
purpose = TREE_PURPOSE (t);
if (purpose)
purpose = RECUR (purpose);
value = TREE_VALUE (t);
if (value)
{
if (TREE_CODE (value) != LABEL_DECL)
value = RECUR (value);
else
{
value = lookup_label (DECL_NAME (value));
gcc_assert (TREE_CODE (value) == LABEL_DECL);
TREE_USED (value) = 1;
}
}
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = RECUR (chain);
return tree_cons (purpose, value, chain);
#undef RECUR
}
/* Used to temporarily communicate the list of #pragma omp parallel
clauses to #pragma omp for instantiation if they are combined
together. */
static tree *omp_parallel_combined_clauses;
static tree tsubst_decomp_names (tree, tree, tree, tsubst_flags_t, tree,
tree *, unsigned int *);
/* Substitute one OMP_FOR iterator. */
static bool
tsubst_omp_for_iterator (tree t, int i, tree declv, tree &orig_declv,
tree initv, tree condv, tree incrv, tree *clauses,
tree args, tsubst_flags_t complain, tree in_decl,
bool integral_constant_expression_p)
{
#define RECUR(NODE) \
tsubst_expr ((NODE), args, complain, in_decl, \
integral_constant_expression_p)
tree decl, init, cond = NULL_TREE, incr = NULL_TREE;
bool ret = false;
init = TREE_VEC_ELT (OMP_FOR_INIT (t), i);
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
decl = TREE_OPERAND (init, 0);
init = TREE_OPERAND (init, 1);
tree decl_expr = NULL_TREE;
bool range_for = TREE_VEC_ELT (OMP_FOR_COND (t), i) == global_namespace;
if (range_for)
{
bool decomp = false;
if (decl != error_mark_node && DECL_HAS_VALUE_EXPR_P (decl))
{
tree v = DECL_VALUE_EXPR (decl);
if (TREE_CODE (v) == ARRAY_REF
&& VAR_P (TREE_OPERAND (v, 0))
&& DECL_DECOMPOSITION_P (TREE_OPERAND (v, 0)))
{
tree decomp_first = NULL_TREE;
unsigned decomp_cnt = 0;
tree d = tsubst_decl (TREE_OPERAND (v, 0), args, complain);
maybe_push_decl (d);
d = tsubst_decomp_names (d, TREE_OPERAND (v, 0), args, complain,
in_decl, &decomp_first, &decomp_cnt);
decomp = true;
if (d == error_mark_node)
decl = error_mark_node;
else
for (unsigned int i = 0; i < decomp_cnt; i++)
{
if (!DECL_HAS_VALUE_EXPR_P (decomp_first))
{
tree v = build_nt (ARRAY_REF, d,
size_int (decomp_cnt - i - 1),
NULL_TREE, NULL_TREE);
SET_DECL_VALUE_EXPR (decomp_first, v);
DECL_HAS_VALUE_EXPR_P (decomp_first) = 1;
}
fit_decomposition_lang_decl (decomp_first, d);
decomp_first = DECL_CHAIN (decomp_first);
}
}
}
decl = tsubst_decl (decl, args, complain);
if (!decomp)
maybe_push_decl (decl);
}
else if (init && TREE_CODE (init) == DECL_EXPR)
{
/* We need to jump through some hoops to handle declarations in the
init-statement, since we might need to handle auto deduction,
but we need to keep control of initialization. */
decl_expr = init;
init = DECL_INITIAL (DECL_EXPR_DECL (init));
decl = tsubst_decl (decl, args, complain);
}
else
{
if (TREE_CODE (decl) == SCOPE_REF)
{
decl = RECUR (decl);
if (TREE_CODE (decl) == COMPONENT_REF)
{
tree v = decl;
while (v)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& DECL_NAME (v) == this_identifier)
{
decl = TREE_OPERAND (decl, 1);
decl = omp_privatize_field (decl, false);
}
/* FALLTHRU */
default:
v = NULL_TREE;
break;
}
}
}
else
decl = RECUR (decl);
}
init = RECUR (init);
if (orig_declv && OMP_FOR_ORIG_DECLS (t))
{
tree o = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (t), i);
if (TREE_CODE (o) == TREE_LIST)
TREE_VEC_ELT (orig_declv, i)
= tree_cons (RECUR (TREE_PURPOSE (o)),
RECUR (TREE_VALUE (o)),
NULL_TREE);
else
TREE_VEC_ELT (orig_declv, i) = RECUR (o);
}
if (range_for)
{
tree this_pre_body = NULL_TREE;
tree orig_init = NULL_TREE;
tree orig_decl = NULL_TREE;
cp_convert_omp_range_for (this_pre_body, NULL, decl, orig_decl, init,
orig_init, cond, incr);
if (orig_decl)
{
if (orig_declv == NULL_TREE)
orig_declv = copy_node (declv);
TREE_VEC_ELT (orig_declv, i) = orig_decl;
ret = true;
}
else if (orig_declv)
TREE_VEC_ELT (orig_declv, i) = decl;
}
tree auto_node = type_uses_auto (TREE_TYPE (decl));
if (!range_for && auto_node && init)
TREE_TYPE (decl)
= do_auto_deduction (TREE_TYPE (decl), init, auto_node, complain);
gcc_assert (!type_dependent_expression_p (decl));
if (!CLASS_TYPE_P (TREE_TYPE (decl)) || range_for)
{
if (decl_expr)
{
/* Declare the variable, but don't let that initialize it. */
tree init_sav = DECL_INITIAL (DECL_EXPR_DECL (decl_expr));
DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = NULL_TREE;
RECUR (decl_expr);
DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = init_sav;
}
if (!range_for)
{
cond = RECUR (TREE_VEC_ELT (OMP_FOR_COND (t), i));
incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i);
if (TREE_CODE (incr) == MODIFY_EXPR)
{
tree lhs = RECUR (TREE_OPERAND (incr, 0));
tree rhs = RECUR (TREE_OPERAND (incr, 1));
incr = build_x_modify_expr (EXPR_LOCATION (incr), lhs,
NOP_EXPR, rhs, complain);
}
else
incr = RECUR (incr);
if (orig_declv && !OMP_FOR_ORIG_DECLS (t))
TREE_VEC_ELT (orig_declv, i) = decl;
}
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
return ret;
}
if (decl_expr)
{
/* Declare and initialize the variable. */
RECUR (decl_expr);
init = NULL_TREE;
}
else if (init)
{
tree *pc;
int j;
for (j = ((omp_parallel_combined_clauses == NULL
|| TREE_CODE (t) == OMP_LOOP) ? 1 : 0); j < 2; j++)
{
for (pc = j ? clauses : omp_parallel_combined_clauses; *pc; )
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*pc) == decl)
break;
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (*pc) == decl)
{
if (j)
break;
/* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */
tree c = *pc;
*pc = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = *clauses;
*clauses = c;
}
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (*pc) == decl)
{
error ("iteration variable %qD should not be firstprivate",
decl);
*pc = OMP_CLAUSE_CHAIN (*pc);
}
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_DECL (*pc) == decl)
{
error ("iteration variable %qD should not be reduction",
decl);
*pc = OMP_CLAUSE_CHAIN (*pc);
}
else
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (*pc)
break;
}
if (*pc == NULL_TREE)
{
tree c = build_omp_clause (input_location,
TREE_CODE (t) == OMP_LOOP
? OMP_CLAUSE_LASTPRIVATE
: OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
c = finish_omp_clauses (c, C_ORT_OMP);
if (c)
{
OMP_CLAUSE_CHAIN (c) = *clauses;
*clauses = c;
}
}
}
cond = TREE_VEC_ELT (OMP_FOR_COND (t), i);
if (COMPARISON_CLASS_P (cond))
{
tree op0 = RECUR (TREE_OPERAND (cond, 0));
tree op1 = RECUR (TREE_OPERAND (cond, 1));
cond = build2 (TREE_CODE (cond), boolean_type_node, op0, op1);
}
else
cond = RECUR (cond);
incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i);
switch (TREE_CODE (incr))
{
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
incr = build2 (TREE_CODE (incr), TREE_TYPE (decl),
RECUR (TREE_OPERAND (incr, 0)), NULL_TREE);
break;
case MODIFY_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
{
tree rhs = TREE_OPERAND (incr, 1);
tree lhs = RECUR (TREE_OPERAND (incr, 0));
tree rhs0 = RECUR (TREE_OPERAND (rhs, 0));
tree rhs1 = RECUR (TREE_OPERAND (rhs, 1));
incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
build2 (TREE_CODE (rhs), TREE_TYPE (decl),
rhs0, rhs1));
}
else
incr = RECUR (incr);
break;
case MODOP_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
{
tree lhs = RECUR (TREE_OPERAND (incr, 0));
incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
build2 (TREE_CODE (TREE_OPERAND (incr, 1)),
TREE_TYPE (decl), lhs,
RECUR (TREE_OPERAND (incr, 2))));
}
else if (TREE_CODE (TREE_OPERAND (incr, 1)) == NOP_EXPR
&& (TREE_CODE (TREE_OPERAND (incr, 2)) == PLUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 2)) == MINUS_EXPR)))
{
tree rhs = TREE_OPERAND (incr, 2);
tree lhs = RECUR (TREE_OPERAND (incr, 0));
tree rhs0 = RECUR (TREE_OPERAND (rhs, 0));
tree rhs1 = RECUR (TREE_OPERAND (rhs, 1));
incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
build2 (TREE_CODE (rhs), TREE_TYPE (decl),
rhs0, rhs1));
}
else
incr = RECUR (incr);
break;
default:
incr = RECUR (incr);
break;
}
if (orig_declv && !OMP_FOR_ORIG_DECLS (t))
TREE_VEC_ELT (orig_declv, i) = decl;
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
return false;
#undef RECUR
}
/* Helper function of tsubst_expr, find OMP_TEAMS inside
of OMP_TARGET's body. */
static tree
tsubst_find_omp_teams (tree *tp, int *walk_subtrees, void *)
{
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_TEAMS:
return *tp;
case BIND_EXPR:
case STATEMENT_LIST:
*walk_subtrees = 1;
break;
default:
break;
}
return NULL_TREE;
}
/* Helper function for tsubst_expr. For decomposition declaration
artificial base DECL, which is tsubsted PATTERN_DECL, tsubst
also the corresponding decls representing the identifiers
of the decomposition declaration. Return DECL if successful
or error_mark_node otherwise, set *FIRST to the first decl
in the list chained through DECL_CHAIN and *CNT to the number
of such decls. */
static tree
tsubst_decomp_names (tree decl, tree pattern_decl, tree args,
tsubst_flags_t complain, tree in_decl, tree *first,
unsigned int *cnt)
{
tree decl2, decl3, prev = decl;
*cnt = 0;
gcc_assert (DECL_NAME (decl) == NULL_TREE);
for (decl2 = DECL_CHAIN (pattern_decl);
decl2
&& VAR_P (decl2)
&& DECL_DECOMPOSITION_P (decl2)
&& DECL_NAME (decl2);
decl2 = DECL_CHAIN (decl2))
{
if (TREE_TYPE (decl2) == error_mark_node && *cnt == 0)
{
gcc_assert (errorcount);
return error_mark_node;
}
(*cnt)++;
gcc_assert (DECL_DECOMP_BASE (decl2) == pattern_decl);
gcc_assert (DECL_HAS_VALUE_EXPR_P (decl2));
tree v = DECL_VALUE_EXPR (decl2);
DECL_HAS_VALUE_EXPR_P (decl2) = 0;
SET_DECL_VALUE_EXPR (decl2, NULL_TREE);
decl3 = tsubst (decl2, args, complain, in_decl);
SET_DECL_VALUE_EXPR (decl2, v);
DECL_HAS_VALUE_EXPR_P (decl2) = 1;
if (VAR_P (decl3))
DECL_TEMPLATE_INSTANTIATED (decl3) = 1;
else
{
gcc_assert (errorcount);
decl = error_mark_node;
continue;
}
maybe_push_decl (decl3);
if (error_operand_p (decl3))
decl = error_mark_node;
else if (decl != error_mark_node
&& DECL_CHAIN (decl3) != prev
&& decl != prev)
{
gcc_assert (errorcount);
decl = error_mark_node;
}
else
prev = decl3;
}
*first = prev;
return decl;
}
/* Return the proper local_specialization for init-capture pack DECL. */
static tree
lookup_init_capture_pack (tree decl)
{
/* We handle normal pack captures by forwarding to the specialization of the
captured parameter. We can't do that for pack init-captures; we need them
to have their own local_specialization. We created the individual
VAR_DECLs (if any) under build_capture_proxy, and we need to collect them
when we process the DECL_EXPR for the pack init-capture in the template.
So, how do we find them? We don't know the capture proxy pack when
building the individual resulting proxies, and we don't know the
individual proxies when instantiating the pack. What we have in common is
the FIELD_DECL.
So...when we instantiate the FIELD_DECL, we stick the result in
local_specializations. Then at the DECL_EXPR we look up that result, see
how many elements it has, synthesize the names, and look them up. */
tree cname = DECL_NAME (decl);
tree val = DECL_VALUE_EXPR (decl);
tree field = TREE_OPERAND (val, 1);
gcc_assert (TREE_CODE (field) == FIELD_DECL);
tree fpack = retrieve_local_specialization (field);
if (fpack == error_mark_node)
return error_mark_node;
int len = 1;
tree vec = NULL_TREE;
tree r = NULL_TREE;
if (TREE_CODE (fpack) == TREE_VEC)
{
len = TREE_VEC_LENGTH (fpack);
vec = make_tree_vec (len);
r = make_node (NONTYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (r, vec);
}
for (int i = 0; i < len; ++i)
{
tree ename = vec ? make_ith_pack_parameter_name (cname, i) : cname;
tree elt = lookup_name_real (ename, 0, 0, true, 0, LOOKUP_NORMAL);
if (vec)
TREE_VEC_ELT (vec, i) = elt;
else
r = elt;
}
return r;
}
/* Like tsubst_copy for expressions, etc. but also does semantic
processing. */
tree
tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
bool integral_constant_expression_p)
{
#define RETURN(EXP) do { r = (EXP); goto out; } while(0)
#define RECUR(NODE) \
tsubst_expr ((NODE), args, complain, in_decl, \
integral_constant_expression_p)
tree stmt, tmp;
tree r;
location_t loc;
if (t == NULL_TREE || t == error_mark_node)
return t;
loc = input_location;
if (location_t eloc = cp_expr_location (t))
input_location = eloc;
if (STATEMENT_CODE_P (TREE_CODE (t)))
current_stmt_tree ()->stmts_are_full_exprs_p = STMT_IS_FULL_EXPR_P (t);
switch (TREE_CODE (t))
{
case STATEMENT_LIST:
{
tree_stmt_iterator i;
for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i))
RECUR (tsi_stmt (i));
break;
}
case CTOR_INITIALIZER:
finish_mem_initializers (tsubst_initializer_list
(TREE_OPERAND (t, 0), args));
break;
case RETURN_EXPR:
finish_return_stmt (RECUR (TREE_OPERAND (t, 0)));
break;
case CO_RETURN_EXPR:
finish_co_return_stmt (input_location, RECUR (TREE_OPERAND (t, 0)));
break;
case CO_YIELD_EXPR:
stmt = finish_co_yield_expr (input_location,
RECUR (TREE_OPERAND (t, 0)));
RETURN (stmt);
break;
case CO_AWAIT_EXPR:
stmt = finish_co_await_expr (input_location,
RECUR (TREE_OPERAND (t, 0)));
RETURN (stmt);
break;
case EXPR_STMT:
tmp = RECUR (EXPR_STMT_EXPR (t));
if (EXPR_STMT_STMT_EXPR_RESULT (t))
finish_stmt_expr_expr (tmp, cur_stmt_expr);
else
finish_expr_stmt (tmp);
break;
case USING_STMT:
finish_using_directive (USING_STMT_NAMESPACE (t), /*attribs=*/NULL_TREE);
break;
case DECL_EXPR:
{
tree decl, pattern_decl;
tree init;
pattern_decl = decl = DECL_EXPR_DECL (t);
if (TREE_CODE (decl) == LABEL_DECL)
finish_label_decl (DECL_NAME (decl));
else if (TREE_CODE (decl) == USING_DECL)
{
tree scope = USING_DECL_SCOPE (decl);
tree name = DECL_NAME (decl);
scope = tsubst (scope, args, complain, in_decl);
finish_nonmember_using_decl (scope, name);
}
else if (is_capture_proxy (decl)
&& !DECL_TEMPLATE_INSTANTIATION (current_function_decl))
{
/* We're in tsubst_lambda_expr, we've already inserted a new
capture proxy, so look it up and register it. */
tree inst;
if (!DECL_PACK_P (decl))
{
inst = lookup_name_real (DECL_NAME (decl), /*prefer_type*/0,
/*nonclass*/1, /*block_p=*/true,
/*ns_only*/0, LOOKUP_HIDDEN);
gcc_assert (inst != decl && is_capture_proxy (inst));
}
else if (is_normal_capture_proxy (decl))
{
inst = (retrieve_local_specialization
(DECL_CAPTURED_VARIABLE (decl)));
gcc_assert (TREE_CODE (inst) == NONTYPE_ARGUMENT_PACK
|| DECL_PACK_P (inst));
}
else
inst = lookup_init_capture_pack (decl);
register_local_specialization (inst, decl);
break;
}
else if (DECL_PRETTY_FUNCTION_P (decl))
decl = make_fname_decl (DECL_SOURCE_LOCATION (decl),
DECL_NAME (decl),
true/*DECL_PRETTY_FUNCTION_P (decl)*/);
else if (DECL_IMPLICIT_TYPEDEF_P (decl)
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
/* Don't copy the old closure; we'll create a new one in
tsubst_lambda_expr. */
break;
else
{
init = DECL_INITIAL (decl);
/* The following tsubst call will clear the DECL_TEMPLATE_INFO
for local variables, so save if DECL was declared constinit. */
const bool constinit_p
= (VAR_P (decl)
&& DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (decl)));
decl = tsubst (decl, args, complain, in_decl);
if (decl != error_mark_node)
{
/* By marking the declaration as instantiated, we avoid
trying to instantiate it. Since instantiate_decl can't
handle local variables, and since we've already done
all that needs to be done, that's the right thing to
do. */
if (VAR_P (decl))
DECL_TEMPLATE_INSTANTIATED (decl) = 1;
if (VAR_P (decl) && !DECL_NAME (decl)
&& ANON_AGGR_TYPE_P (TREE_TYPE (decl)))
/* Anonymous aggregates are a special case. */
finish_anon_union (decl);
else if (is_capture_proxy (DECL_EXPR_DECL (t)))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl) == this_identifier)
{
tree lam = DECL_CONTEXT (current_function_decl);
lam = CLASSTYPE_LAMBDA_EXPR (lam);
LAMBDA_EXPR_THIS_CAPTURE (lam) = decl;
}
insert_capture_proxy (decl);
}
else if (DECL_IMPLICIT_TYPEDEF_P (t))
/* We already did a pushtag. */;
else if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_OMP_DECLARE_REDUCTION_P (decl)
&& DECL_FUNCTION_SCOPE_P (pattern_decl))
{
DECL_CONTEXT (decl) = NULL_TREE;
pushdecl (decl);
DECL_CONTEXT (decl) = current_function_decl;
cp_check_omp_declare_reduction (decl);
}
else
{
bool const_init = false;
unsigned int cnt = 0;
tree first = NULL_TREE, ndecl = error_mark_node;
tree asmspec_tree = NULL_TREE;
maybe_push_decl (decl);
if (VAR_P (decl)
&& DECL_DECOMPOSITION_P (decl)
&& TREE_TYPE (pattern_decl) != error_mark_node)
ndecl = tsubst_decomp_names (decl, pattern_decl, args,
complain, in_decl, &first,
&cnt);
init = tsubst_init (init, decl, args, complain, in_decl);
if (VAR_P (decl))
const_init = (DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P
(pattern_decl));
if (ndecl != error_mark_node)
cp_maybe_mangle_decomp (ndecl, first, cnt);
if (VAR_P (decl) && DECL_HARD_REGISTER (pattern_decl))
{
tree id = DECL_ASSEMBLER_NAME (pattern_decl);
const char *asmspec = IDENTIFIER_POINTER (id);
gcc_assert (asmspec[0] == '*');
asmspec_tree
= build_string (IDENTIFIER_LENGTH (id) - 1,
asmspec + 1);
TREE_TYPE (asmspec_tree) = char_array_type_node;
}
cp_finish_decl (decl, init, const_init, asmspec_tree,
constinit_p ? LOOKUP_CONSTINIT : 0);
if (ndecl != error_mark_node)
cp_finish_decomp (ndecl, first, cnt);
}
}
}
break;
}
case FOR_STMT:
stmt = begin_for_stmt (NULL_TREE, NULL_TREE);
RECUR (FOR_INIT_STMT (t));
finish_init_stmt (stmt);
tmp = RECUR (FOR_COND (t));
finish_for_cond (tmp, stmt, false, 0);
tmp = RECUR (FOR_EXPR (t));
finish_for_expr (tmp, stmt);
{
bool prev = note_iteration_stmt_body_start ();
RECUR (FOR_BODY (t));
note_iteration_stmt_body_end (prev);
}
finish_for_stmt (stmt);
break;
case RANGE_FOR_STMT:
{
/* Construct another range_for, if this is not a final
substitution (for inside a generic lambda of a
template). Otherwise convert to a regular for. */
tree decl, expr;
stmt = (processing_template_decl
? begin_range_for_stmt (NULL_TREE, NULL_TREE)
: begin_for_stmt (NULL_TREE, NULL_TREE));
RECUR (RANGE_FOR_INIT_STMT (t));
decl = RANGE_FOR_DECL (t);
decl = tsubst (decl, args, complain, in_decl);
maybe_push_decl (decl);
expr = RECUR (RANGE_FOR_EXPR (t));
tree decomp_first = NULL_TREE;
unsigned decomp_cnt = 0;
if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl))
decl = tsubst_decomp_names (decl, RANGE_FOR_DECL (t), args,
complain, in_decl,
&decomp_first, &decomp_cnt);
if (processing_template_decl)
{
RANGE_FOR_IVDEP (stmt) = RANGE_FOR_IVDEP (t);
RANGE_FOR_UNROLL (stmt) = RANGE_FOR_UNROLL (t);
finish_range_for_decl (stmt, decl, expr);
if (decomp_first && decl != error_mark_node)
cp_finish_decomp (decl, decomp_first, decomp_cnt);
}
else
{
unsigned short unroll = (RANGE_FOR_UNROLL (t)
? tree_to_uhwi (RANGE_FOR_UNROLL (t)) : 0);
stmt = cp_convert_range_for (stmt, decl, expr,
decomp_first, decomp_cnt,
RANGE_FOR_IVDEP (t), unroll);
}
bool prev = note_iteration_stmt_body_start ();
RECUR (RANGE_FOR_BODY (t));
note_iteration_stmt_body_end (prev);
finish_for_stmt (stmt);
}
break;
case WHILE_STMT:
stmt = begin_while_stmt ();
tmp = RECUR (WHILE_COND (t));
finish_while_stmt_cond (tmp, stmt, false, 0);
{
bool prev = note_iteration_stmt_body_start ();
RECUR (WHILE_BODY (t));
note_iteration_stmt_body_end (prev);
}
finish_while_stmt (stmt);
break;
case DO_STMT:
stmt = begin_do_stmt ();
{
bool prev = note_iteration_stmt_body_start ();
RECUR (DO_BODY (t));
note_iteration_stmt_body_end (prev);
}
finish_do_body (stmt);
tmp = RECUR (DO_COND (t));
finish_do_stmt (tmp, stmt, false, 0);
break;
case IF_STMT:
stmt = begin_if_stmt ();
IF_STMT_CONSTEXPR_P (stmt) = IF_STMT_CONSTEXPR_P (t);
if (IF_STMT_CONSTEXPR_P (t))
args = add_extra_args (IF_STMT_EXTRA_ARGS (t), args);
tmp = RECUR (IF_COND (t));
tmp = finish_if_stmt_cond (tmp, stmt);
if (IF_STMT_CONSTEXPR_P (t)
&& instantiation_dependent_expression_p (tmp))
{
/* We're partially instantiating a generic lambda, but the condition
of the constexpr if is still dependent. Don't substitute into the
branches now, just remember the template arguments. */
do_poplevel (IF_SCOPE (stmt));
IF_COND (stmt) = IF_COND (t);
THEN_CLAUSE (stmt) = THEN_CLAUSE (t);
ELSE_CLAUSE (stmt) = ELSE_CLAUSE (t);
IF_STMT_EXTRA_ARGS (stmt) = build_extra_args (t, args, complain);
add_stmt (stmt);
break;
}
if (IF_STMT_CONSTEXPR_P (t) && integer_zerop (tmp))
/* Don't instantiate the THEN_CLAUSE. */;
else
{
tree folded = fold_non_dependent_expr (tmp, complain);
bool inhibit = integer_zerop (folded);
if (inhibit)
++c_inhibit_evaluation_warnings;
RECUR (THEN_CLAUSE (t));
if (inhibit)
--c_inhibit_evaluation_warnings;
}
finish_then_clause (stmt);
if (IF_STMT_CONSTEXPR_P (t) && integer_nonzerop (tmp))
/* Don't instantiate the ELSE_CLAUSE. */;
else if (ELSE_CLAUSE (t))
{
tree folded = fold_non_dependent_expr (tmp, complain);
bool inhibit = integer_nonzerop (folded);
begin_else_clause (stmt);
if (inhibit)
++c_inhibit_evaluation_warnings;
RECUR (ELSE_CLAUSE (t));
if (inhibit)
--c_inhibit_evaluation_warnings;
finish_else_clause (stmt);
}
finish_if_stmt (stmt);
break;
case BIND_EXPR:
if (BIND_EXPR_BODY_BLOCK (t))
stmt = begin_function_body ();
else
stmt = begin_compound_stmt (BIND_EXPR_TRY_BLOCK (t)
? BCS_TRY_BLOCK : 0);
RECUR (BIND_EXPR_BODY (t));
if (BIND_EXPR_BODY_BLOCK (t))
finish_function_body (stmt);
else
finish_compound_stmt (stmt);
break;
case BREAK_STMT:
finish_break_stmt ();
break;
case CONTINUE_STMT:
finish_continue_stmt ();
break;
case SWITCH_STMT:
stmt = begin_switch_stmt ();
tmp = RECUR (SWITCH_STMT_COND (t));
finish_switch_cond (tmp, stmt);
RECUR (SWITCH_STMT_BODY (t));
finish_switch_stmt (stmt);
break;
case CASE_LABEL_EXPR:
{
tree decl = CASE_LABEL (t);
tree low = RECUR (CASE_LOW (t));
tree high = RECUR (CASE_HIGH (t));
tree l = finish_case_label (EXPR_LOCATION (t), low, high);
if (l && TREE_CODE (l) == CASE_LABEL_EXPR)
{
tree label = CASE_LABEL (l);
FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl);
if (DECL_ATTRIBUTES (decl) != NULL_TREE)
cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0);
}
}
break;
case LABEL_EXPR:
{
tree decl = LABEL_EXPR_LABEL (t);
tree label;
label = finish_label_stmt (DECL_NAME (decl));
if (TREE_CODE (label) == LABEL_DECL)
FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl);
if (DECL_ATTRIBUTES (decl) != NULL_TREE)
cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0);
}
break;
case GOTO_EXPR:
tmp = GOTO_DESTINATION (t);
if (TREE_CODE (tmp) != LABEL_DECL)
/* Computed goto's must be tsubst'd into. On the other hand,
non-computed gotos must not be; the identifier in question
will have no binding. */
tmp = RECUR (tmp);
else
tmp = DECL_NAME (tmp);
finish_goto_stmt (tmp);
break;
case ASM_EXPR:
{
tree string = RECUR (ASM_STRING (t));
tree outputs = tsubst_copy_asm_operands (ASM_OUTPUTS (t), args,
complain, in_decl);
tree inputs = tsubst_copy_asm_operands (ASM_INPUTS (t), args,
complain, in_decl);
tree clobbers = tsubst_copy_asm_operands (ASM_CLOBBERS (t), args,
complain, in_decl);
tree labels = tsubst_copy_asm_operands (ASM_LABELS (t), args,
complain, in_decl);
tmp = finish_asm_stmt (EXPR_LOCATION (t), ASM_VOLATILE_P (t), string,
outputs, inputs, clobbers, labels,
ASM_INLINE_P (t));
tree asm_expr = tmp;
if (TREE_CODE (asm_expr) == CLEANUP_POINT_EXPR)
asm_expr = TREE_OPERAND (asm_expr, 0);
ASM_INPUT_P (asm_expr) = ASM_INPUT_P (t);
}
break;
case TRY_BLOCK:
if (CLEANUP_P (t))
{
stmt = begin_try_block ();
RECUR (TRY_STMTS (t));
finish_cleanup_try_block (stmt);
finish_cleanup (RECUR (TRY_HANDLERS (t)), stmt);
}
else
{
tree compound_stmt = NULL_TREE;
if (FN_TRY_BLOCK_P (t))
stmt = begin_function_try_block (&compound_stmt);
else
stmt = begin_try_block ();
RECUR (TRY_STMTS (t));
if (FN_TRY_BLOCK_P (t))
finish_function_try_block (stmt);
else
finish_try_block (stmt);
RECUR (TRY_HANDLERS (t));
if (FN_TRY_BLOCK_P (t))
finish_function_handler_sequence (stmt, compound_stmt);
else
finish_handler_sequence (stmt);
}
break;
case HANDLER:
{
tree decl = HANDLER_PARMS (t);
if (decl)
{
decl = tsubst (decl, args, complain, in_decl);
/* Prevent instantiate_decl from trying to instantiate
this variable. We've already done all that needs to be
done. */
if (decl != error_mark_node)
DECL_TEMPLATE_INSTANTIATED (decl) = 1;
}
stmt = begin_handler ();
finish_handler_parms (decl, stmt);
RECUR (HANDLER_BODY (t));
finish_handler (stmt);
}
break;
case TAG_DEFN:
tmp = tsubst (TREE_TYPE (t), args, complain, NULL_TREE);
if (CLASS_TYPE_P (tmp))
{
/* Local classes are not independent templates; they are
instantiated along with their containing function. And this
way we don't have to deal with pushing out of one local class
to instantiate a member of another local class. */
/* Closures are handled by the LAMBDA_EXPR. */
gcc_assert (!LAMBDA_TYPE_P (TREE_TYPE (t)));
complete_type (tmp);
for (tree fld = TYPE_FIELDS (tmp); fld; fld = DECL_CHAIN (fld))
if ((VAR_P (fld)
|| (TREE_CODE (fld) == FUNCTION_DECL
&& !DECL_ARTIFICIAL (fld)))
&& DECL_TEMPLATE_INSTANTIATION (fld))
instantiate_decl (fld, /*defer_ok=*/false,
/*expl_inst_class=*/false);
}
break;
case STATIC_ASSERT:
{
tree condition;
++c_inhibit_evaluation_warnings;
condition =
tsubst_expr (STATIC_ASSERT_CONDITION (t),
args,
complain, in_decl,
/*integral_constant_expression_p=*/true);
--c_inhibit_evaluation_warnings;
finish_static_assert (condition,
STATIC_ASSERT_MESSAGE (t),
STATIC_ASSERT_SOURCE_LOCATION (t),
/*member_p=*/false);
}
break;
case OACC_KERNELS:
case OACC_PARALLEL:
case OACC_SERIAL:
tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_ACC, args, complain,
in_decl);
stmt = begin_omp_parallel ();
RECUR (OMP_BODY (t));
finish_omp_construct (TREE_CODE (t), stmt, tmp);
break;
case OMP_PARALLEL:
r = push_omp_privatization_clauses (OMP_PARALLEL_COMBINED (t));
tmp = tsubst_omp_clauses (OMP_PARALLEL_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
if (OMP_PARALLEL_COMBINED (t))
omp_parallel_combined_clauses = &tmp;
stmt = begin_omp_parallel ();
RECUR (OMP_PARALLEL_BODY (t));
gcc_assert (omp_parallel_combined_clauses == NULL);
OMP_PARALLEL_COMBINED (finish_omp_parallel (tmp, stmt))
= OMP_PARALLEL_COMBINED (t);
pop_omp_privatization_clauses (r);
break;
case OMP_TASK:
if (OMP_TASK_BODY (t) == NULL_TREE)
{
tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
t = copy_node (t);
OMP_TASK_CLAUSES (t) = tmp;
add_stmt (t);
break;
}
r = push_omp_privatization_clauses (false);
tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
stmt = begin_omp_task ();
RECUR (OMP_TASK_BODY (t));
finish_omp_task (tmp, stmt);
pop_omp_privatization_clauses (r);
break;
case OMP_FOR:
case OMP_LOOP:
case OMP_SIMD:
case OMP_DISTRIBUTE:
case OMP_TASKLOOP:
case OACC_LOOP:
{
tree clauses, body, pre_body;
tree declv = NULL_TREE, initv = NULL_TREE, condv = NULL_TREE;
tree orig_declv = NULL_TREE;
tree incrv = NULL_TREE;
enum c_omp_region_type ort = C_ORT_OMP;
bool any_range_for = false;
int i;
if (TREE_CODE (t) == OACC_LOOP)
ort = C_ORT_ACC;
r = push_omp_privatization_clauses (OMP_FOR_INIT (t) == NULL_TREE);
clauses = tsubst_omp_clauses (OMP_FOR_CLAUSES (t), ort, args, complain,
in_decl);
if (OMP_FOR_INIT (t) != NULL_TREE)
{
declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
if (OMP_FOR_ORIG_DECLS (t))
orig_declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
initv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
condv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
incrv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
}
keep_next_level (true);
stmt = begin_omp_structured_block ();
pre_body = push_stmt_list ();
RECUR (OMP_FOR_PRE_BODY (t));
pre_body = pop_stmt_list (pre_body);
if (OMP_FOR_INIT (t) != NULL_TREE)
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
any_range_for
|= tsubst_omp_for_iterator (t, i, declv, orig_declv, initv,
condv, incrv, &clauses, args,
complain, in_decl,
integral_constant_expression_p);
omp_parallel_combined_clauses = NULL;
if (any_range_for)
{
gcc_assert (orig_declv);
body = begin_omp_structured_block ();
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
if (TREE_VEC_ELT (orig_declv, i) != TREE_VEC_ELT (declv, i)
&& TREE_CODE (TREE_VEC_ELT (orig_declv, i)) == TREE_LIST
&& TREE_CHAIN (TREE_VEC_ELT (orig_declv, i)))
cp_finish_omp_range_for (TREE_VEC_ELT (orig_declv, i),
TREE_VEC_ELT (declv, i));
}
else
body = push_stmt_list ();
RECUR (OMP_FOR_BODY (t));
if (any_range_for)
body = finish_omp_structured_block (body);
else
body = pop_stmt_list (body);
if (OMP_FOR_INIT (t) != NULL_TREE)
t = finish_omp_for (EXPR_LOCATION (t), TREE_CODE (t), declv,
orig_declv, initv, condv, incrv, body, pre_body,
NULL, clauses);
else
{
t = make_node (TREE_CODE (t));
TREE_TYPE (t) = void_type_node;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
OMP_FOR_CLAUSES (t) = clauses;
SET_EXPR_LOCATION (t, EXPR_LOCATION (t));
add_stmt (t);
}
add_stmt (finish_omp_for_block (finish_omp_structured_block (stmt),
t));
pop_omp_privatization_clauses (r);
}
break;
case OMP_SECTIONS:
omp_parallel_combined_clauses = NULL;
/* FALLTHRU */
case OMP_SINGLE:
case OMP_TEAMS:
case OMP_CRITICAL:
case OMP_TASKGROUP:
case OMP_SCAN:
r = push_omp_privatization_clauses (TREE_CODE (t) == OMP_TEAMS
&& OMP_TEAMS_COMBINED (t));
tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_OMP, args, complain,
in_decl);
if (TREE_CODE (t) == OMP_TEAMS)
{
keep_next_level (true);
stmt = begin_omp_structured_block ();
RECUR (OMP_BODY (t));
stmt = finish_omp_structured_block (stmt);
}
else
{
stmt = push_stmt_list ();
RECUR (OMP_BODY (t));
stmt = pop_stmt_list (stmt);
}
t = copy_node (t);
OMP_BODY (t) = stmt;
OMP_CLAUSES (t) = tmp;
add_stmt (t);
pop_omp_privatization_clauses (r);
break;
case OMP_DEPOBJ:
r = RECUR (OMP_DEPOBJ_DEPOBJ (t));
if (OMP_DEPOBJ_CLAUSES (t) && OMP_DEPOBJ_CLAUSES (t) != error_mark_node)
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_SOURCE;
if (TREE_CODE (OMP_DEPOBJ_CLAUSES (t)) == OMP_CLAUSE)
{
tmp = tsubst_omp_clauses (OMP_DEPOBJ_CLAUSES (t), C_ORT_OMP,
args, complain, in_decl);
if (tmp == NULL_TREE)
tmp = error_mark_node;
}
else
{
kind = (enum omp_clause_depend_kind)
tree_to_uhwi (OMP_DEPOBJ_CLAUSES (t));
tmp = NULL_TREE;
}
finish_omp_depobj (EXPR_LOCATION (t), r, kind, tmp);
}
else
finish_omp_depobj (EXPR_LOCATION (t), r,
OMP_CLAUSE_DEPEND_SOURCE,
OMP_DEPOBJ_CLAUSES (t));
break;
case OACC_DATA:
case OMP_TARGET_DATA:
case OMP_TARGET:
tmp = tsubst_omp_clauses (OMP_CLAUSES (t), (TREE_CODE (t) == OACC_DATA)
? C_ORT_ACC : C_ORT_OMP, args, complain,
in_decl);
keep_next_level (true);
stmt = begin_omp_structured_block ();
RECUR (OMP_BODY (t));
stmt = finish_omp_structured_block (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
OMP_CLAUSES (t) = tmp;
if (TREE_CODE (t) == OMP_TARGET && OMP_TARGET_COMBINED (t))
{
tree teams = cp_walk_tree (&stmt, tsubst_find_omp_teams, NULL, NULL);
if (teams)
{
/* For combined target teams, ensure the num_teams and
thread_limit clause expressions are evaluated on the host,
before entering the target construct. */
tree c;
for (c = OMP_TEAMS_CLAUSES (teams);
c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT)
&& TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST)
{
tree expr = OMP_CLAUSE_OPERAND (c, 0);
expr = force_target_expr (TREE_TYPE (expr), expr, tf_none);
if (expr == error_mark_node)
continue;
tmp = TARGET_EXPR_SLOT (expr);
add_stmt (expr);
OMP_CLAUSE_OPERAND (c, 0) = expr;
tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (tc) = tmp;
OMP_CLAUSE_CHAIN (tc) = OMP_TARGET_CLAUSES (t);
OMP_TARGET_CLAUSES (t) = tc;
}
}
}
add_stmt (t);
break;
case OACC_DECLARE:
t = copy_node (t);
tmp = tsubst_omp_clauses (OACC_DECLARE_CLAUSES (t), C_ORT_ACC, args,
complain, in_decl);
OACC_DECLARE_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OMP_TARGET_UPDATE:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
t = copy_node (t);
OMP_STANDALONE_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OACC_CACHE:
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_UPDATE:
tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_ACC, args,
complain, in_decl);
t = copy_node (t);
OMP_STANDALONE_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OMP_ORDERED:
tmp = tsubst_omp_clauses (OMP_ORDERED_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
stmt = push_stmt_list ();
RECUR (OMP_BODY (t));
stmt = pop_stmt_list (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
OMP_ORDERED_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OMP_MASTER:
omp_parallel_combined_clauses = NULL;
/* FALLTHRU */
case OMP_SECTION:
stmt = push_stmt_list ();
RECUR (OMP_BODY (t));
stmt = pop_stmt_list (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
add_stmt (t);
break;
case OMP_ATOMIC:
gcc_assert (OMP_ATOMIC_DEPENDENT_P (t));
tmp = NULL_TREE;
if (TREE_CODE (TREE_OPERAND (t, 0)) == OMP_CLAUSE)
tmp = tsubst_omp_clauses (TREE_OPERAND (t, 0), C_ORT_OMP, args,
complain, in_decl);
if (TREE_CODE (TREE_OPERAND (t, 1)) != MODIFY_EXPR)
{
tree op1 = TREE_OPERAND (t, 1);
tree rhs1 = NULL_TREE;
tree lhs, rhs;
if (TREE_CODE (op1) == COMPOUND_EXPR)
{
rhs1 = RECUR (TREE_OPERAND (op1, 0));
op1 = TREE_OPERAND (op1, 1);
}
lhs = RECUR (TREE_OPERAND (op1, 0));
rhs = RECUR (TREE_OPERAND (op1, 1));
finish_omp_atomic (EXPR_LOCATION (t), OMP_ATOMIC, TREE_CODE (op1),
lhs, rhs, NULL_TREE, NULL_TREE, rhs1, tmp,
OMP_ATOMIC_MEMORY_ORDER (t));
}
else
{
tree op1 = TREE_OPERAND (t, 1);
tree v = NULL_TREE, lhs, rhs = NULL_TREE, lhs1 = NULL_TREE;
tree rhs1 = NULL_TREE;
enum tree_code code = TREE_CODE (TREE_OPERAND (op1, 1));
enum tree_code opcode = NOP_EXPR;
if (code == OMP_ATOMIC_READ)
{
v = RECUR (TREE_OPERAND (op1, 0));
lhs = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0));
}
else if (code == OMP_ATOMIC_CAPTURE_OLD
|| code == OMP_ATOMIC_CAPTURE_NEW)
{
tree op11 = TREE_OPERAND (TREE_OPERAND (op1, 1), 1);
v = RECUR (TREE_OPERAND (op1, 0));
lhs1 = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0));
if (TREE_CODE (op11) == COMPOUND_EXPR)
{
rhs1 = RECUR (TREE_OPERAND (op11, 0));
op11 = TREE_OPERAND (op11, 1);
}
lhs = RECUR (TREE_OPERAND (op11, 0));
rhs = RECUR (TREE_OPERAND (op11, 1));
opcode = TREE_CODE (op11);
if (opcode == MODIFY_EXPR)
opcode = NOP_EXPR;
}
else
{
code = OMP_ATOMIC;
lhs = RECUR (TREE_OPERAND (op1, 0));
rhs = RECUR (TREE_OPERAND (op1, 1));
}
finish_omp_atomic (EXPR_LOCATION (t), code, opcode, lhs, rhs, v,
lhs1, rhs1, tmp, OMP_ATOMIC_MEMORY_ORDER (t));
}
break;
case TRANSACTION_EXPR:
{
int flags = 0;
flags |= (TRANSACTION_EXPR_OUTER (t) ? TM_STMT_ATTR_OUTER : 0);
flags |= (TRANSACTION_EXPR_RELAXED (t) ? TM_STMT_ATTR_RELAXED : 0);
if (TRANSACTION_EXPR_IS_STMT (t))
{
tree body = TRANSACTION_EXPR_BODY (t);
tree noex = NULL_TREE;
if (TREE_CODE (body) == MUST_NOT_THROW_EXPR)
{
noex = MUST_NOT_THROW_COND (body);
if (noex == NULL_TREE)
noex = boolean_true_node;
body = TREE_OPERAND (body, 0);
}
stmt = begin_transaction_stmt (input_location, NULL, flags);
RECUR (body);
finish_transaction_stmt (stmt, NULL, flags, RECUR (noex));
}
else
{
stmt = build_transaction_expr (EXPR_LOCATION (t),
RECUR (TRANSACTION_EXPR_BODY (t)),
flags, NULL_TREE);
RETURN (stmt);
}
}
break;
case MUST_NOT_THROW_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree cond = RECUR (MUST_NOT_THROW_COND (t));
RETURN (build_must_not_throw_expr (op0, cond));
}
case EXPR_PACK_EXPANSION:
error ("invalid use of pack expansion expression");
RETURN (error_mark_node);
case NONTYPE_ARGUMENT_PACK:
error ("use %<...%> to expand argument pack");
RETURN (error_mark_node);
case COMPOUND_EXPR:
tmp = RECUR (TREE_OPERAND (t, 0));
if (tmp == NULL_TREE)
/* If the first operand was a statement, we're done with it. */
RETURN (RECUR (TREE_OPERAND (t, 1)));
RETURN (build_x_compound_expr (EXPR_LOCATION (t), tmp,
RECUR (TREE_OPERAND (t, 1)),
complain));
case ANNOTATE_EXPR:
tmp = RECUR (TREE_OPERAND (t, 0));
RETURN (build3_loc (EXPR_LOCATION (t), ANNOTATE_EXPR,
TREE_TYPE (tmp), tmp,
RECUR (TREE_OPERAND (t, 1)),
RECUR (TREE_OPERAND (t, 2))));
case PREDICT_EXPR:
RETURN (add_stmt (copy_node (t)));
default:
gcc_assert (!STATEMENT_CODE_P (TREE_CODE (t)));
RETURN (tsubst_copy_and_build (t, args, complain, in_decl,
/*function_p=*/false,
integral_constant_expression_p));
}
RETURN (NULL_TREE);
out:
input_location = loc;
return r;
#undef RECUR
#undef RETURN
}
/* Instantiate the special body of the artificial DECL_OMP_DECLARE_REDUCTION
function. For description of the body see comment above
cp_parser_omp_declare_reduction_exprs. */
static void
tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
if (t == NULL_TREE || t == error_mark_node)
return;
gcc_assert (TREE_CODE (t) == STATEMENT_LIST);
tree_stmt_iterator tsi;
int i;
tree stmts[7];
memset (stmts, 0, sizeof stmts);
for (i = 0, tsi = tsi_start (t);
i < 7 && !tsi_end_p (tsi);
i++, tsi_next (&tsi))
stmts[i] = tsi_stmt (tsi);
gcc_assert (tsi_end_p (tsi));
if (i >= 3)
{
gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR
&& TREE_CODE (stmts[1]) == DECL_EXPR);
tree omp_out = tsubst (DECL_EXPR_DECL (stmts[0]),
args, complain, in_decl);
tree omp_in = tsubst (DECL_EXPR_DECL (stmts[1]),
args, complain, in_decl);
DECL_CONTEXT (omp_out) = current_function_decl;
DECL_CONTEXT (omp_in) = current_function_decl;
keep_next_level (true);
tree block = begin_omp_structured_block ();
tsubst_expr (stmts[2], args, complain, in_decl, false);
block = finish_omp_structured_block (block);
block = maybe_cleanup_point_expr_void (block);
add_decl_expr (omp_out);
if (TREE_NO_WARNING (DECL_EXPR_DECL (stmts[0])))
TREE_NO_WARNING (omp_out) = 1;
add_decl_expr (omp_in);
finish_expr_stmt (block);
}
if (i >= 6)
{
gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR
&& TREE_CODE (stmts[4]) == DECL_EXPR);
tree omp_priv = tsubst (DECL_EXPR_DECL (stmts[3]),
args, complain, in_decl);
tree omp_orig = tsubst (DECL_EXPR_DECL (stmts[4]),
args, complain, in_decl);
DECL_CONTEXT (omp_priv) = current_function_decl;
DECL_CONTEXT (omp_orig) = current_function_decl;
keep_next_level (true);
tree block = begin_omp_structured_block ();
tsubst_expr (stmts[5], args, complain, in_decl, false);
block = finish_omp_structured_block (block);
block = maybe_cleanup_point_expr_void (block);
cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL);
add_decl_expr (omp_priv);
add_decl_expr (omp_orig);
finish_expr_stmt (block);
if (i == 7)
add_decl_expr (omp_orig);
}
}
/* T is a postfix-expression that is not being used in a function
call. Return the substituted version of T. */
static tree
tsubst_non_call_postfix_expression (tree t, tree args,
tsubst_flags_t complain,
tree in_decl)
{
if (TREE_CODE (t) == SCOPE_REF)
t = tsubst_qualified_id (t, args, complain, in_decl,
/*done=*/false, /*address_p=*/false);
else
t = tsubst_copy_and_build (t, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
return t;
}
/* Subroutine of tsubst_lambda_expr: add the FIELD/INIT capture pair to the
LAMBDA_EXPR_CAPTURE_LIST passed in LIST. Do deduction for a previously
dependent init-capture. */
static void
prepend_one_capture (tree field, tree init, tree &list,
tsubst_flags_t complain)
{
if (tree auto_node = type_uses_auto (TREE_TYPE (field)))
{
tree type = NULL_TREE;
if (!init)
{
if (complain & tf_error)
error ("empty initializer in lambda init-capture");
init = error_mark_node;
}
else if (TREE_CODE (init) == TREE_LIST)
init = build_x_compound_expr_from_list (init, ELK_INIT, complain);
if (!type)
type = do_auto_deduction (TREE_TYPE (field), init, auto_node, complain);
TREE_TYPE (field) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), field);
}
list = tree_cons (field, init, list);
}
/* T is a LAMBDA_EXPR. Generate a new LAMBDA_EXPR for the current
instantiation context. Instantiating a pack expansion containing a lambda
might result in multiple lambdas all based on the same lambda in the
template. */
tree
tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
tree oldfn = lambda_function (t);
in_decl = oldfn;
tree r = build_lambda_expr ();
LAMBDA_EXPR_LOCATION (r)
= LAMBDA_EXPR_LOCATION (t);
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (r)
= LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (t);
LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t);
LAMBDA_EXPR_INSTANTIATED (r) = true;
if (LAMBDA_EXPR_EXTRA_SCOPE (t) == NULL_TREE)
/* A lambda in a default argument outside a class gets no
LAMBDA_EXPR_EXTRA_SCOPE, as specified by the ABI. But
tsubst_default_argument calls start_lambda_scope, so we need to
specifically ignore it here, and use the global scope. */
record_null_lambda_scope (r);
else
record_lambda_scope (r);
gcc_assert (LAMBDA_EXPR_THIS_CAPTURE (t) == NULL_TREE
&& LAMBDA_EXPR_PENDING_PROXIES (t) == NULL);
vec<tree,va_gc>* field_packs = NULL;
for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap;
cap = TREE_CHAIN (cap))
{
tree ofield = TREE_PURPOSE (cap);
tree init = TREE_VALUE (cap);
if (PACK_EXPANSION_P (init))
init = tsubst_pack_expansion (init, args, complain, in_decl);
else
init = tsubst_copy_and_build (init, args, complain, in_decl,
/*fn*/false, /*constexpr*/false);
if (init == error_mark_node)
return error_mark_node;
if (init && TREE_CODE (init) == TREE_LIST)
init = build_x_compound_expr_from_list (init, ELK_INIT, complain);
if (!processing_template_decl
&& init && TREE_CODE (init) != TREE_VEC
&& variably_modified_type_p (TREE_TYPE (init), NULL_TREE))
{
/* For a VLA, simply tsubsting the field type won't work, we need to
go through add_capture again. XXX do we want to do this for all
captures? */
tree name = (get_identifier
(IDENTIFIER_POINTER (DECL_NAME (ofield)) + 2));
tree ftype = TREE_TYPE (ofield);
bool by_ref = (TYPE_REF_P (ftype)
|| (TREE_CODE (ftype) == DECLTYPE_TYPE
&& DECLTYPE_FOR_REF_CAPTURE (ftype)));
add_capture (r, name, init, by_ref, !DECL_NORMAL_CAPTURE_P (ofield));
continue;
}
if (PACK_EXPANSION_P (ofield))
ofield = PACK_EXPANSION_PATTERN (ofield);
tree field = tsubst_decl (ofield, args, complain);
if (DECL_PACK_P (ofield) && !DECL_NORMAL_CAPTURE_P (ofield))
{
/* Remember these for when we've pushed local_specializations. */
vec_safe_push (field_packs, ofield);
vec_safe_push (field_packs, field);
}
if (field == error_mark_node)
return error_mark_node;
if (TREE_CODE (field) == TREE_VEC)
{
int len = TREE_VEC_LENGTH (field);
gcc_assert (TREE_CODE (init) == TREE_VEC
&& TREE_VEC_LENGTH (init) == len);
for (int i = 0; i < len; ++i)
prepend_one_capture (TREE_VEC_ELT (field, i),
TREE_VEC_ELT (init, i),
LAMBDA_EXPR_CAPTURE_LIST (r),
complain);
}
else
{
prepend_one_capture (field, init, LAMBDA_EXPR_CAPTURE_LIST (r),
complain);
if (id_equal (DECL_NAME (field), "__this"))
LAMBDA_EXPR_THIS_CAPTURE (r) = field;
}
}
tree type = begin_lambda_type (r);
if (type == error_mark_node)
return error_mark_node;
/* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */
determine_visibility (TYPE_NAME (type));
register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (r));
tree oldtmpl = (generic_lambda_fn_p (oldfn)
? DECL_TI_TEMPLATE (oldfn)
: NULL_TREE);
tree fntype = static_fn_type (oldfn);
if (oldtmpl)
++processing_template_decl;
fntype = tsubst (fntype, args, complain, in_decl);
if (oldtmpl)
--processing_template_decl;
if (fntype == error_mark_node)
r = error_mark_node;
else
{
/* The body of a lambda-expression is not a subexpression of the
enclosing expression. Parms are to have DECL_CHAIN tsubsted,
which would be skipped if cp_unevaluated_operand. */
cp_evaluated ev;
/* Fix the type of 'this'. */
fntype = build_memfn_type (fntype, type,
type_memfn_quals (fntype),
type_memfn_rqual (fntype));
tree fn, tmpl;
if (oldtmpl)
{
tmpl = tsubst_template_decl (oldtmpl, args, complain, fntype);
if (tmpl == error_mark_node)
{
r = error_mark_node;
goto out;
}
fn = DECL_TEMPLATE_RESULT (tmpl);
finish_member_declaration (tmpl);
}
else
{
tmpl = NULL_TREE;
fn = tsubst_function_decl (oldfn, args, complain, fntype);
if (fn == error_mark_node)
{
r = error_mark_node;
goto out;
}
finish_member_declaration (fn);
}
if (tree ci = get_constraints (oldfn))
{
/* Substitute into the lambda's constraints. */
if (oldtmpl)
++processing_template_decl;
ci = tsubst_constraint_info (ci, args, complain, in_decl);
if (oldtmpl)
--processing_template_decl;
set_constraints (fn, ci);
}
/* Let finish_function set this. */
DECL_DECLARED_CONSTEXPR_P (fn) = false;
bool nested = cfun;
if (nested)
push_function_context ();
else
/* Still increment function_depth so that we don't GC in the
middle of an expression. */
++function_depth;
local_specialization_stack s (lss_copy);
tree body = start_lambda_function (fn, r);
/* Now record them for lookup_init_capture_pack. */
int fplen = vec_safe_length (field_packs);
for (int i = 0; i < fplen; )
{
tree pack = (*field_packs)[i++];
tree inst = (*field_packs)[i++];
register_local_specialization (inst, pack);
}
release_tree_vector (field_packs);
register_parameter_specializations (oldfn, fn);
if (oldtmpl)
{
/* We might not partially instantiate some parts of the function, so
copy these flags from the original template. */
language_function *ol = DECL_STRUCT_FUNCTION (oldfn)->language;
current_function_returns_value = ol->returns_value;
current_function_returns_null = ol->returns_null;
current_function_returns_abnormally = ol->returns_abnormally;
current_function_infinite_loop = ol->infinite_loop;
}
/* [temp.deduct] A lambda-expression appearing in a function type or a
template parameter is not considered part of the immediate context for
the purposes of template argument deduction. */
complain = tf_warning_or_error;
tsubst_expr (DECL_SAVED_TREE (oldfn), args, complain, r,
/*constexpr*/false);
finish_lambda_function (body);
if (nested)
pop_function_context ();
else
--function_depth;
/* The capture list was built up in reverse order; fix that now. */
LAMBDA_EXPR_CAPTURE_LIST (r)
= nreverse (LAMBDA_EXPR_CAPTURE_LIST (r));
LAMBDA_EXPR_THIS_CAPTURE (r) = NULL_TREE;
maybe_add_lambda_conv_op (type);
}
out:
finish_struct (type, /*attr*/NULL_TREE);
insert_pending_capture_proxies ();
return r;
}
/* Like tsubst but deals with expressions and performs semantic
analysis. FUNCTION_P is true if T is the "F" in "F (ARGS)" or
"F<TARGS> (ARGS)". */
tree
tsubst_copy_and_build (tree t,
tree args,
tsubst_flags_t complain,
tree in_decl,
bool function_p,
bool integral_constant_expression_p)
{
#define RETURN(EXP) do { retval = (EXP); goto out; } while(0)
#define RECUR(NODE) \
tsubst_copy_and_build (NODE, args, complain, in_decl, \
/*function_p=*/false, \
integral_constant_expression_p)
tree retval, op1;
location_t save_loc;
if (t == NULL_TREE || t == error_mark_node)
return t;
save_loc = input_location;
if (location_t eloc = cp_expr_location (t))
input_location = eloc;
/* N3276 decltype magic only applies to calls at the top level or on the
right side of a comma. */
tsubst_flags_t decltype_flag = (complain & tf_decltype);
complain &= ~tf_decltype;
switch (TREE_CODE (t))
{
case USING_DECL:
t = DECL_NAME (t);
/* Fall through. */
case IDENTIFIER_NODE:
{
tree decl;
cp_id_kind idk;
bool non_integral_constant_expression_p;
const char *error_msg;
if (IDENTIFIER_CONV_OP_P (t))
{
tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
t = make_conv_op_name (new_type);
}
/* Look up the name. */
decl = lookup_name (t);
/* By convention, expressions use ERROR_MARK_NODE to indicate
failure, not NULL_TREE. */
if (decl == NULL_TREE)
decl = error_mark_node;
decl = finish_id_expression (t, decl, NULL_TREE,
&idk,
integral_constant_expression_p,
/*allow_non_integral_constant_expression_p=*/(cxx_dialect >= cxx11),
&non_integral_constant_expression_p,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
input_location);
if (error_msg)
error (error_msg);
if (!function_p && identifier_p (decl))
{
if (complain & tf_error)
unqualified_name_lookup_error (decl);
decl = error_mark_node;
}
RETURN (decl);
}
case TEMPLATE_ID_EXPR:
{
tree object;
tree templ = tsubst_copy_and_build (TREE_OPERAND (t, 0), args,
complain, in_decl,
function_p,
integral_constant_expression_p);
tree targs = TREE_OPERAND (t, 1);
if (targs)
targs = tsubst_template_args (targs, args, complain, in_decl);
if (targs == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (templ) == SCOPE_REF)
{
tree name = TREE_OPERAND (templ, 1);
tree tid = lookup_template_function (name, targs);
TREE_OPERAND (templ, 1) = tid;
RETURN (templ);
}
if (concept_definition_p (templ))
{
tree check = build_concept_check (templ, targs, complain);
if (check == error_mark_node)
RETURN (error_mark_node);
tree id = unpack_concept_check (check);
/* If we built a function concept check, return the underlying
template-id. So we can evaluate it as a function call. */
if (function_concept_p (TREE_OPERAND (id, 0)))
RETURN (id);
RETURN (check);
}
if (variable_template_p (templ))
{
tree r = lookup_and_finish_template_variable (templ, targs,
complain);
r = maybe_wrap_with_location (r, EXPR_LOCATION (t));
RETURN (r);
}
if (TREE_CODE (templ) == COMPONENT_REF)
{
object = TREE_OPERAND (templ, 0);
templ = TREE_OPERAND (templ, 1);
}
else
object = NULL_TREE;
tree tid = lookup_template_function (templ, targs);
if (object)
RETURN (build3 (COMPONENT_REF, TREE_TYPE (tid),
object, tid, NULL_TREE));
else if (identifier_p (templ))
{
/* C++20 P0846: we can encounter an IDENTIFIER_NODE here when
name lookup found nothing when parsing the template name. */
gcc_assert (cxx_dialect >= cxx2a || seen_error ());
RETURN (tid);
}
else
RETURN (baselink_for_fns (tid));
}
case INDIRECT_REF:
{
tree r = RECUR (TREE_OPERAND (t, 0));
if (REFERENCE_REF_P (t))
{
/* A type conversion to reference type will be enclosed in
such an indirect ref, but the substitution of the cast
will have also added such an indirect ref. */
r = convert_from_reference (r);
}
else
r = build_x_indirect_ref (input_location, r, RO_UNARY_STAR,
complain|decltype_flag);
if (REF_PARENTHESIZED_P (t))
r = force_paren_expr (r);
RETURN (r);
}
case NOP_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = RECUR (TREE_OPERAND (t, 0));
RETURN (build_nop (type, op0));
}
case IMPLICIT_CONV_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree expr = RECUR (TREE_OPERAND (t, 0));
if (dependent_type_p (type) || type_dependent_expression_p (expr))
{
retval = copy_node (t);
TREE_TYPE (retval) = type;
TREE_OPERAND (retval, 0) = expr;
RETURN (retval);
}
if (IMPLICIT_CONV_EXPR_NONTYPE_ARG (t))
/* We'll pass this to convert_nontype_argument again, we don't need
to actually perform any conversion here. */
RETURN (expr);
int flags = LOOKUP_IMPLICIT;
if (IMPLICIT_CONV_EXPR_DIRECT_INIT (t))
flags = LOOKUP_NORMAL;
if (IMPLICIT_CONV_EXPR_BRACED_INIT (t))
flags |= LOOKUP_NO_NARROWING;
RETURN (perform_implicit_conversion_flags (type, expr, complain,
flags));
}
case CONVERT_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = RECUR (TREE_OPERAND (t, 0));
if (op0 == error_mark_node)
RETURN (error_mark_node);
RETURN (build1 (CONVERT_EXPR, type, op0));
}
case CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case STATIC_CAST_EXPR:
{
tree type;
tree op, r = NULL_TREE;
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (integral_constant_expression_p
&& !cast_valid_in_integral_constant_expression_p (type))
{
if (complain & tf_error)
error ("a cast to a type other than an integral or "
"enumeration type cannot appear in a constant-expression");
RETURN (error_mark_node);
}
op = RECUR (TREE_OPERAND (t, 0));
warning_sentinel s(warn_useless_cast);
warning_sentinel s2(warn_ignored_qualifiers);
switch (TREE_CODE (t))
{
case CAST_EXPR:
r = build_functional_cast (input_location, type, op, complain);
break;
case REINTERPRET_CAST_EXPR:
r = build_reinterpret_cast (input_location, type, op, complain);
break;
case CONST_CAST_EXPR:
r = build_const_cast (input_location, type, op, complain);
break;
case DYNAMIC_CAST_EXPR:
r = build_dynamic_cast (input_location, type, op, complain);
break;
case STATIC_CAST_EXPR:
r = build_static_cast (input_location, type, op, complain);
break;
default:
gcc_unreachable ();
}
RETURN (r);
}
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
RETURN (build_x_unary_op (input_location, TREE_CODE (t), op1,
complain|decltype_flag));
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case NEGATE_EXPR:
case BIT_NOT_EXPR:
case ABS_EXPR:
case TRUTH_NOT_EXPR:
case UNARY_PLUS_EXPR: /* Unary + */
case REALPART_EXPR:
case IMAGPART_EXPR:
RETURN (build_x_unary_op (input_location, TREE_CODE (t),
RECUR (TREE_OPERAND (t, 0)),
complain|decltype_flag));
case FIX_TRUNC_EXPR:
gcc_unreachable ();
case ADDR_EXPR:
op1 = TREE_OPERAND (t, 0);
if (TREE_CODE (op1) == LABEL_DECL)
RETURN (finish_label_address_expr (DECL_NAME (op1),
EXPR_LOCATION (op1)));
if (TREE_CODE (op1) == SCOPE_REF)
op1 = tsubst_qualified_id (op1, args, complain, in_decl,
/*done=*/true, /*address_p=*/true);
else
op1 = tsubst_non_call_postfix_expression (op1, args, complain,
in_decl);
RETURN (build_x_unary_op (input_location, ADDR_EXPR, op1,
complain|decltype_flag));
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case RSHIFT_EXPR:
case LSHIFT_EXPR:
case EQ_EXPR:
case NE_EXPR:
case MAX_EXPR:
case MIN_EXPR:
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
case SPACESHIP_EXPR:
case MEMBER_REF:
case DOTSTAR_EXPR:
{
/* If T was type-dependent, suppress warnings that depend on the range
of the types involved. */
++processing_template_decl;
const bool was_dep = (potential_constant_expression (t)
? value_dependent_expression_p (t)
: type_dependent_expression_p (t));
--processing_template_decl;
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
warning_sentinel s1(warn_type_limits, was_dep);
warning_sentinel s2(warn_div_by_zero, was_dep);
warning_sentinel s3(warn_logical_op, was_dep);
warning_sentinel s4(warn_tautological_compare, was_dep);
tree r = build_x_binary_op
(input_location, TREE_CODE (t),
op0,
(TREE_NO_WARNING (TREE_OPERAND (t, 0))
? ERROR_MARK
: TREE_CODE (TREE_OPERAND (t, 0))),
op1,
(TREE_NO_WARNING (TREE_OPERAND (t, 1))
? ERROR_MARK
: TREE_CODE (TREE_OPERAND (t, 1))),
/*overload=*/NULL,
complain|decltype_flag);
if (EXPR_P (r) && TREE_NO_WARNING (t))
TREE_NO_WARNING (r) = TREE_NO_WARNING (t);
RETURN (r);
}
case POINTER_PLUS_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
if (op0 == error_mark_node)
RETURN (error_mark_node);
tree op1 = RECUR (TREE_OPERAND (t, 1));
if (op1 == error_mark_node)
RETURN (error_mark_node);
RETURN (fold_build_pointer_plus (op0, op1));
}
case SCOPE_REF:
RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true,
/*address_p=*/false));
case BASELINK:
RETURN (tsubst_baselink (t, current_nonlambda_class_type (),
args, complain, in_decl));
case ARRAY_REF:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
RETURN (build_x_array_ref (EXPR_LOCATION (t), op1,
RECUR (TREE_OPERAND (t, 1)),
complain|decltype_flag));
case SIZEOF_EXPR:
if (PACK_EXPANSION_P (TREE_OPERAND (t, 0))
|| ARGUMENT_PACK_P (TREE_OPERAND (t, 0)))
RETURN (tsubst_copy (t, args, complain, in_decl));
/* Fall through */
case ALIGNOF_EXPR:
{
tree r;
op1 = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t))
op1 = TREE_TYPE (op1);
bool std_alignof = (TREE_CODE (t) == ALIGNOF_EXPR
&& ALIGNOF_EXPR_STD_P (t));
if (!args)
{
/* When there are no ARGS, we are trying to evaluate a
non-dependent expression from the parser. Trying to do
the substitutions may not work. */
if (!TYPE_P (op1))
op1 = TREE_TYPE (op1);
}
else
{
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
if (TYPE_P (op1))
op1 = tsubst (op1, args, complain, in_decl);
else
op1 = tsubst_copy_and_build (op1, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/
false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
}
if (TYPE_P (op1))
r = cxx_sizeof_or_alignof_type (input_location,
op1, TREE_CODE (t), std_alignof,
complain & tf_error);
else
r = cxx_sizeof_or_alignof_expr (input_location,
op1, TREE_CODE (t),
complain & tf_error);
if (TREE_CODE (t) == SIZEOF_EXPR && r != error_mark_node)
{
if (TREE_CODE (r) != SIZEOF_EXPR || TYPE_P (op1))
{
if (!processing_template_decl && TYPE_P (op1))
{
r = build_min (SIZEOF_EXPR, size_type_node,
build1 (NOP_EXPR, op1, error_mark_node));
SIZEOF_EXPR_TYPE_P (r) = 1;
}
else
r = build_min (SIZEOF_EXPR, size_type_node, op1);
TREE_SIDE_EFFECTS (r) = 0;
TREE_READONLY (r) = 1;
}
SET_EXPR_LOCATION (r, EXPR_LOCATION (t));
}
RETURN (r);
}
case AT_ENCODE_EXPR:
{
op1 = TREE_OPERAND (t, 0);
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
op1 = tsubst_copy_and_build (op1, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
RETURN (objc_build_encode_expr (op1));
}
case NOEXCEPT_EXPR:
op1 = TREE_OPERAND (t, 0);
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
++cp_noexcept_operand;
op1 = tsubst_copy_and_build (op1, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
--cp_noexcept_operand;
RETURN (finish_noexcept_expr (op1, complain));
case MODOP_EXPR:
{
warning_sentinel s(warn_div_by_zero);
tree lhs = RECUR (TREE_OPERAND (t, 0));
tree rhs = RECUR (TREE_OPERAND (t, 2));
tree r = build_x_modify_expr
(EXPR_LOCATION (t), lhs, TREE_CODE (TREE_OPERAND (t, 1)), rhs,
complain|decltype_flag);
/* TREE_NO_WARNING must be set if either the expression was
parenthesized or it uses an operator such as >>= rather
than plain assignment. In the former case, it was already
set and must be copied. In the latter case,
build_x_modify_expr sets it and it must not be reset
here. */
if (TREE_NO_WARNING (t))
TREE_NO_WARNING (r) = TREE_NO_WARNING (t);
RETURN (r);
}
case ARROW_EXPR:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
/* Remember that there was a reference to this entity. */
if (DECL_P (op1)
&& !mark_used (op1, complain) && !(complain & tf_error))
RETURN (error_mark_node);
RETURN (build_x_arrow (input_location, op1, complain));
case NEW_EXPR:
{
tree placement = RECUR (TREE_OPERAND (t, 0));
tree init = RECUR (TREE_OPERAND (t, 3));
vec<tree, va_gc> *placement_vec;
vec<tree, va_gc> *init_vec;
tree ret;
location_t loc = EXPR_LOCATION (t);
if (placement == NULL_TREE)
placement_vec = NULL;
else
{
placement_vec = make_tree_vector ();
for (; placement != NULL_TREE; placement = TREE_CHAIN (placement))
vec_safe_push (placement_vec, TREE_VALUE (placement));
}
/* If there was an initializer in the original tree, but it
instantiated to an empty list, then we should pass a
non-NULL empty vector to tell build_new that it was an
empty initializer() rather than no initializer. This can
only happen when the initializer is a pack expansion whose
parameter packs are of length zero. */
if (init == NULL_TREE && TREE_OPERAND (t, 3) == NULL_TREE)
init_vec = NULL;
else
{
init_vec = make_tree_vector ();
if (init == void_node)
gcc_assert (init_vec != NULL);
else
{
for (; init != NULL_TREE; init = TREE_CHAIN (init))
vec_safe_push (init_vec, TREE_VALUE (init));
}
}
/* Avoid passing an enclosing decl to valid_array_size_p. */
in_decl = NULL_TREE;
tree op1 = tsubst (TREE_OPERAND (t, 1), args, complain, in_decl);
tree op2 = RECUR (TREE_OPERAND (t, 2));
ret = build_new (loc, &placement_vec, op1, op2,
&init_vec, NEW_EXPR_USE_GLOBAL (t),
complain);
if (placement_vec != NULL)
release_tree_vector (placement_vec);
if (init_vec != NULL)
release_tree_vector (init_vec);
RETURN (ret);
}
case DELETE_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
RETURN (delete_sanity (input_location, op0, op1,
DELETE_EXPR_USE_VEC (t),
DELETE_EXPR_USE_GLOBAL (t),
complain));
}
case COMPOUND_EXPR:
{
tree op0 = tsubst_copy_and_build (TREE_OPERAND (t, 0), args,
complain & ~tf_decltype, in_decl,
/*function_p=*/false,
integral_constant_expression_p);
RETURN (build_x_compound_expr (EXPR_LOCATION (t),
op0,
RECUR (TREE_OPERAND (t, 1)),
complain|decltype_flag));
}
case CALL_EXPR:
{
tree function;
unsigned int nargs, i;
bool qualified_p;
bool koenig_p;
tree ret;
function = CALL_EXPR_FN (t);
/* Internal function with no arguments. */
if (function == NULL_TREE && call_expr_nargs (t) == 0)
RETURN (t);
/* When we parsed the expression, we determined whether or
not Koenig lookup should be performed. */
koenig_p = KOENIG_LOOKUP_P (t);
if (function == NULL_TREE)
{
koenig_p = false;
qualified_p = false;
}
else if (TREE_CODE (function) == SCOPE_REF)
{
qualified_p = true;
function = tsubst_qualified_id (function, args, complain, in_decl,
/*done=*/false,
/*address_p=*/false);
}
else if (koenig_p && identifier_p (function))
{
/* Do nothing; calling tsubst_copy_and_build on an identifier
would incorrectly perform unqualified lookup again.
Note that we can also have an IDENTIFIER_NODE if the earlier
unqualified lookup found a member function; in that case
koenig_p will be false and we do want to do the lookup
again to find the instantiated member function.
FIXME but doing that causes c++/15272, so we need to stop
using IDENTIFIER_NODE in that situation. */
qualified_p = false;
}
else
{
if (TREE_CODE (function) == COMPONENT_REF)
{
tree op = TREE_OPERAND (function, 1);
qualified_p = (TREE_CODE (op) == SCOPE_REF
|| (BASELINK_P (op)
&& BASELINK_QUALIFIED_P (op)));
}
else
qualified_p = false;
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
/* Avoid error about taking the address of a constructor. */
function = TREE_OPERAND (function, 0);
function = tsubst_copy_and_build (function, args, complain,
in_decl,
!qualified_p,
integral_constant_expression_p);
if (BASELINK_P (function))
qualified_p = true;
}
nargs = call_expr_nargs (t);
releasing_vec call_args;
for (i = 0; i < nargs; ++i)
{
tree arg = CALL_EXPR_ARG (t, i);
if (!PACK_EXPANSION_P (arg))
vec_safe_push (call_args, RECUR (CALL_EXPR_ARG (t, i)));
else
{
/* Expand the pack expansion and push each entry onto
CALL_ARGS. */
arg = tsubst_pack_expansion (arg, args, complain, in_decl);
if (TREE_CODE (arg) == TREE_VEC)
{
unsigned int len, j;
len = TREE_VEC_LENGTH (arg);
for (j = 0; j < len; ++j)
{
tree value = TREE_VEC_ELT (arg, j);
if (value != NULL_TREE)
value = convert_from_reference (value);
vec_safe_push (call_args, value);
}
}
else
{
/* A partial substitution. Add one entry. */
vec_safe_push (call_args, arg);
}
}
}
/* Stripped-down processing for a call in a thunk. Specifically, in
the thunk template for a generic lambda. */
if (CALL_FROM_THUNK_P (t))
{
/* Now that we've expanded any packs, the number of call args
might be different. */
unsigned int cargs = call_args->length ();
tree thisarg = NULL_TREE;
if (TREE_CODE (function) == COMPONENT_REF)
{
thisarg = TREE_OPERAND (function, 0);
if (TREE_CODE (thisarg) == INDIRECT_REF)
thisarg = TREE_OPERAND (thisarg, 0);
function = TREE_OPERAND (function, 1);
if (TREE_CODE (function) == BASELINK)
function = BASELINK_FUNCTIONS (function);
}
/* We aren't going to do normal overload resolution, so force the
template-id to resolve. */
function = resolve_nondeduced_context (function, complain);
for (unsigned i = 0; i < cargs; ++i)
{
/* In a thunk, pass through args directly, without any
conversions. */
tree arg = (*call_args)[i];
while (TREE_CODE (arg) != PARM_DECL)
arg = TREE_OPERAND (arg, 0);
(*call_args)[i] = arg;
}
if (thisarg)
{
/* If there are no other args, just push 'this'. */
if (cargs == 0)
vec_safe_push (call_args, thisarg);
else
{
/* Otherwise, shift the other args over to make room. */
tree last = (*call_args)[cargs - 1];
vec_safe_push (call_args, last);
for (int i = cargs - 1; i > 0; --i)
(*call_args)[i] = (*call_args)[i - 1];
(*call_args)[0] = thisarg;
}
}
ret = build_call_a (function, call_args->length (),
call_args->address ());
/* The thunk location is not interesting. */
SET_EXPR_LOCATION (ret, UNKNOWN_LOCATION);
CALL_FROM_THUNK_P (ret) = true;
if (CLASS_TYPE_P (TREE_TYPE (ret)))
CALL_EXPR_RETURN_SLOT_OPT (ret) = true;
RETURN (ret);
}
/* We do not perform argument-dependent lookup if normal
lookup finds a non-function, in accordance with the
resolution of DR 218. */
if (koenig_p
&& ((is_overloaded_fn (function)
/* If lookup found a member function, the Koenig lookup is
not appropriate, even if an unqualified-name was used
to denote the function. */
&& !DECL_FUNCTION_MEMBER_P (get_first_fn (function)))
|| identifier_p (function)
/* C++20 P0846: Lookup found nothing. */
|| (TREE_CODE (function) == TEMPLATE_ID_EXPR
&& identifier_p (TREE_OPERAND (function, 0))))
/* Only do this when substitution turns a dependent call
into a non-dependent call. */
&& type_dependent_expression_p_push (t)
&& !any_type_dependent_arguments_p (call_args))
function = perform_koenig_lookup (function, call_args, tf_none);
if (function != NULL_TREE
&& (identifier_p (function)
|| (TREE_CODE (function) == TEMPLATE_ID_EXPR
&& identifier_p (TREE_OPERAND (function, 0))))
&& !any_type_dependent_arguments_p (call_args))
{
if (TREE_CODE (function) == TEMPLATE_ID_EXPR)
function = TREE_OPERAND (function, 0);
if (koenig_p && (complain & tf_warning_or_error))
{
/* For backwards compatibility and good diagnostics, try
the unqualified lookup again if we aren't in SFINAE
context. */
tree unq = (tsubst_copy_and_build
(function, args, complain, in_decl, true,
integral_constant_expression_p));
if (unq == error_mark_node)
RETURN (error_mark_node);
if (unq != function)
{
/* In a lambda fn, we have to be careful to not
introduce new this captures. Legacy code can't
be using lambdas anyway, so it's ok to be
stricter. */
bool in_lambda = (current_class_type
&& LAMBDA_TYPE_P (current_class_type));
char const *const msg
= G_("%qD was not declared in this scope, "
"and no declarations were found by "
"argument-dependent lookup at the point "
"of instantiation");
bool diag = true;
if (in_lambda)
error_at (cp_expr_loc_or_input_loc (t),
msg, function);
else
diag = permerror (cp_expr_loc_or_input_loc (t),
msg, function);
if (diag)
{
tree fn = unq;
if (INDIRECT_REF_P (fn))
fn = TREE_OPERAND (fn, 0);
if (is_overloaded_fn (fn))
fn = get_first_fn (fn);
if (!DECL_P (fn))
/* Can't say anything more. */;
else if (DECL_CLASS_SCOPE_P (fn))
{
location_t loc = cp_expr_loc_or_input_loc (t);
inform (loc,
"declarations in dependent base %qT are "
"not found by unqualified lookup",
DECL_CLASS_CONTEXT (fn));
if (current_class_ptr)
inform (loc,
"use %<this->%D%> instead", function);
else
inform (loc,
"use %<%T::%D%> instead",
current_class_name, function);
}
else
inform (DECL_SOURCE_LOCATION (fn),
"%qD declared here, later in the "
"translation unit", fn);
if (in_lambda)
RETURN (error_mark_node);
}
function = unq;
}
}
if (identifier_p (function))
{
if (complain & tf_error)
unqualified_name_lookup_error (function);
RETURN (error_mark_node);
}
}
/* Remember that there was a reference to this entity. */
if (function != NULL_TREE
&& DECL_P (function)
&& !mark_used (function, complain) && !(complain & tf_error))
RETURN (error_mark_node);
/* Put back tf_decltype for the actual call. */
complain |= decltype_flag;
if (function == NULL_TREE)
switch (CALL_EXPR_IFN (t))
{
case IFN_LAUNDER:
gcc_assert (nargs == 1);
if (vec_safe_length (call_args) != 1)
{
error_at (cp_expr_loc_or_input_loc (t),
"wrong number of arguments to "
"%<__builtin_launder%>");
ret = error_mark_node;
}
else
ret = finish_builtin_launder (cp_expr_loc_or_input_loc (t),
(*call_args)[0], complain);
break;
case IFN_VEC_CONVERT:
gcc_assert (nargs == 1);
if (vec_safe_length (call_args) != 1)
{
error_at (cp_expr_loc_or_input_loc (t),
"wrong number of arguments to "
"%<__builtin_convertvector%>");
ret = error_mark_node;
break;
}
ret = cp_build_vec_convert ((*call_args)[0], input_location,
tsubst (TREE_TYPE (t), args,
complain, in_decl),
complain);
if (TREE_CODE (ret) == VIEW_CONVERT_EXPR)
RETURN (ret);
break;
default:
/* Unsupported internal function with arguments. */
gcc_unreachable ();
}
else if (TREE_CODE (function) == OFFSET_REF
|| TREE_CODE (function) == DOTSTAR_EXPR
|| TREE_CODE (function) == MEMBER_REF)
ret = build_offset_ref_call_from_tree (function, &call_args,
complain);
else if (TREE_CODE (function) == COMPONENT_REF)
{
tree instance = TREE_OPERAND (function, 0);
tree fn = TREE_OPERAND (function, 1);
if (processing_template_decl
&& (type_dependent_expression_p (instance)
|| (!BASELINK_P (fn)
&& TREE_CODE (fn) != FIELD_DECL)
|| type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (call_args)))
ret = build_min_nt_call_vec (function, call_args);
else if (!BASELINK_P (fn))
ret = finish_call_expr (function, &call_args,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
complain);
else
ret = (build_new_method_call
(instance, fn,
&call_args, NULL_TREE,
qualified_p ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL,
/*fn_p=*/NULL,
complain));
}
else if (concept_check_p (function))
{
/* FUNCTION is a template-id referring to a concept definition. */
tree id = unpack_concept_check (function);
tree tmpl = TREE_OPERAND (id, 0);
tree args = TREE_OPERAND (id, 1);
/* Calls to standard and variable concepts should have been
previously diagnosed. */
gcc_assert (function_concept_p (tmpl));
/* Ensure the result is wrapped as a call expression. */
ret = build_concept_check (tmpl, args, tf_warning_or_error);
}
else
ret = finish_call_expr (function, &call_args,
/*disallow_virtual=*/qualified_p,
koenig_p,
complain);
if (ret != error_mark_node)
{
bool op = CALL_EXPR_OPERATOR_SYNTAX (t);
bool ord = CALL_EXPR_ORDERED_ARGS (t);
bool rev = CALL_EXPR_REVERSE_ARGS (t);
if (op || ord || rev)
{
function = extract_call_expr (ret);
CALL_EXPR_OPERATOR_SYNTAX (function) = op;
CALL_EXPR_ORDERED_ARGS (function) = ord;
CALL_EXPR_REVERSE_ARGS (function) = rev;
}
}
RETURN (ret);
}
case COND_EXPR:
{
tree cond = RECUR (TREE_OPERAND (t, 0));
cond = mark_rvalue_use (cond);
tree folded_cond = fold_non_dependent_expr (cond, complain);
tree exp1, exp2;
if (TREE_CODE (folded_cond) == INTEGER_CST)
{
if (integer_zerop (folded_cond))
{
++c_inhibit_evaluation_warnings;
exp1 = RECUR (TREE_OPERAND (t, 1));
--c_inhibit_evaluation_warnings;
exp2 = RECUR (TREE_OPERAND (t, 2));
}
else
{
exp1 = RECUR (TREE_OPERAND (t, 1));
++c_inhibit_evaluation_warnings;
exp2 = RECUR (TREE_OPERAND (t, 2));
--c_inhibit_evaluation_warnings;
}
cond = folded_cond;
}
else
{
exp1 = RECUR (TREE_OPERAND (t, 1));
exp2 = RECUR (TREE_OPERAND (t, 2));
}
warning_sentinel s(warn_duplicated_branches);
RETURN (build_x_conditional_expr (EXPR_LOCATION (t),
cond, exp1, exp2, complain));
}
case PSEUDO_DTOR_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
tree op2 = tsubst (TREE_OPERAND (t, 2), args, complain, in_decl);
RETURN (finish_pseudo_destructor_expr (op0, op1, op2,
input_location));
}
case TREE_LIST:
{
tree purpose, value, chain;
if (t == void_list_node)
RETURN (t);
if ((TREE_PURPOSE (t) && PACK_EXPANSION_P (TREE_PURPOSE (t)))
|| (TREE_VALUE (t) && PACK_EXPANSION_P (TREE_VALUE (t))))
{
/* We have pack expansions, so expand those and
create a new list out of it. */
tree purposevec = NULL_TREE;
tree valuevec = NULL_TREE;
tree chain;
int i, len = -1;
/* Expand the argument expressions. */
if (TREE_PURPOSE (t))
purposevec = tsubst_pack_expansion (TREE_PURPOSE (t), args,
complain, in_decl);
if (TREE_VALUE (t))
valuevec = tsubst_pack_expansion (TREE_VALUE (t), args,
complain, in_decl);
/* Build the rest of the list. */
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = RECUR (chain);
/* Determine the number of arguments. */
if (purposevec && TREE_CODE (purposevec) == TREE_VEC)
{
len = TREE_VEC_LENGTH (purposevec);
gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec));
}
else if (TREE_CODE (valuevec) == TREE_VEC)
len = TREE_VEC_LENGTH (valuevec);
else
{
/* Since we only performed a partial substitution into
the argument pack, we only RETURN (a single list
node. */
if (purposevec == TREE_PURPOSE (t)
&& valuevec == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
RETURN (t);
RETURN (tree_cons (purposevec, valuevec, chain));
}
/* Convert the argument vectors into a TREE_LIST */
i = len;
while (i > 0)
{
/* Grab the Ith values. */
i--;
purpose = purposevec ? TREE_VEC_ELT (purposevec, i)
: NULL_TREE;
value
= valuevec ? convert_from_reference (TREE_VEC_ELT (valuevec, i))
: NULL_TREE;
/* Build the list (backwards). */
chain = tree_cons (purpose, value, chain);
}
RETURN (chain);
}
purpose = TREE_PURPOSE (t);
if (purpose)
purpose = RECUR (purpose);
value = TREE_VALUE (t);
if (value)
value = RECUR (value);
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = RECUR (chain);
if (purpose == TREE_PURPOSE (t)
&& value == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
RETURN (t);
RETURN (tree_cons (purpose, value, chain));
}
case COMPONENT_REF:
{
tree object;
tree object_type;
tree member;
tree r;
object = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
/* Remember that there was a reference to this entity. */
if (DECL_P (object)
&& !mark_used (object, complain) && !(complain & tf_error))
RETURN (error_mark_node);
object_type = TREE_TYPE (object);
member = TREE_OPERAND (t, 1);
if (BASELINK_P (member))
member = tsubst_baselink (member,
non_reference (TREE_TYPE (object)),
args, complain, in_decl);
else
member = tsubst_copy (member, args, complain, in_decl);
if (member == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (member) == FIELD_DECL)
{
r = finish_non_static_data_member (member, object, NULL_TREE);
if (TREE_CODE (r) == COMPONENT_REF)
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t);
RETURN (r);
}
else if (type_dependent_expression_p (object))
/* We can't do much here. */;
else if (!CLASS_TYPE_P (object_type))
{
if (scalarish_type_p (object_type))
{
tree s = NULL_TREE;
tree dtor = member;
if (TREE_CODE (dtor) == SCOPE_REF)
{
s = TREE_OPERAND (dtor, 0);
dtor = TREE_OPERAND (dtor, 1);
}
if (TREE_CODE (dtor) == BIT_NOT_EXPR)
{
dtor = TREE_OPERAND (dtor, 0);
if (TYPE_P (dtor))
RETURN (finish_pseudo_destructor_expr
(object, s, dtor, input_location));
}
}
}
else if (TREE_CODE (member) == SCOPE_REF
&& TREE_CODE (TREE_OPERAND (member, 1)) == TEMPLATE_ID_EXPR)
{
/* Lookup the template functions now that we know what the
scope is. */
tree scope = TREE_OPERAND (member, 0);
tree tmpl = TREE_OPERAND (TREE_OPERAND (member, 1), 0);
tree args = TREE_OPERAND (TREE_OPERAND (member, 1), 1);
member = lookup_qualified_name (scope, tmpl,
/*is_type_p=*/false,
/*complain=*/false);
if (BASELINK_P (member))
{
BASELINK_FUNCTIONS (member)
= build_nt (TEMPLATE_ID_EXPR, BASELINK_FUNCTIONS (member),
args);
member = (adjust_result_of_qualified_name_lookup
(member, BINFO_TYPE (BASELINK_BINFO (member)),
object_type));
}
else
{
qualified_name_lookup_error (scope, tmpl, member,
input_location);
RETURN (error_mark_node);
}
}
else if (TREE_CODE (member) == SCOPE_REF
&& !CLASS_TYPE_P (TREE_OPERAND (member, 0))
&& TREE_CODE (TREE_OPERAND (member, 0)) != NAMESPACE_DECL)
{
if (complain & tf_error)
{
if (TYPE_P (TREE_OPERAND (member, 0)))
error ("%qT is not a class or namespace",
TREE_OPERAND (member, 0));
else
error ("%qD is not a class or namespace",
TREE_OPERAND (member, 0));
}
RETURN (error_mark_node);
}
r = finish_class_member_access_expr (object, member,
/*template_p=*/false,
complain);
if (TREE_CODE (r) == COMPONENT_REF)
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t);
RETURN (r);
}
case THROW_EXPR:
RETURN (build_throw
(input_location, RECUR (TREE_OPERAND (t, 0))));
case CONSTRUCTOR:
{
vec<constructor_elt, va_gc> *n;
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
bool process_index_p;
int newlen;
bool need_copy_p = false;
tree r;
if (type == error_mark_node)
RETURN (error_mark_node);
/* We do not want to process the index of aggregate
initializers as they are identifier nodes which will be
looked up by digest_init. */
process_index_p = !(type && MAYBE_CLASS_TYPE_P (type));
if (null_member_pointer_value_p (t))
{
gcc_assert (same_type_p (type, TREE_TYPE (t)));
RETURN (t);
}
n = vec_safe_copy (CONSTRUCTOR_ELTS (t));
newlen = vec_safe_length (n);
FOR_EACH_VEC_SAFE_ELT (n, idx, ce)
{
if (ce->index && process_index_p
/* An identifier index is looked up in the type
being initialized, not the current scope. */
&& TREE_CODE (ce->index) != IDENTIFIER_NODE)
ce->index = RECUR (ce->index);
if (PACK_EXPANSION_P (ce->value))
{
/* Substitute into the pack expansion. */
ce->value = tsubst_pack_expansion (ce->value, args, complain,
in_decl);
if (ce->value == error_mark_node
|| PACK_EXPANSION_P (ce->value))
;
else if (TREE_VEC_LENGTH (ce->value) == 1)
/* Just move the argument into place. */
ce->value = TREE_VEC_ELT (ce->value, 0);
else
{
/* Update the length of the final CONSTRUCTOR
arguments vector, and note that we will need to
copy.*/
newlen = newlen + TREE_VEC_LENGTH (ce->value) - 1;
need_copy_p = true;
}
}
else
ce->value = RECUR (ce->value);
}
if (need_copy_p)
{
vec<constructor_elt, va_gc> *old_n = n;
vec_alloc (n, newlen);
FOR_EACH_VEC_ELT (*old_n, idx, ce)
{
if (TREE_CODE (ce->value) == TREE_VEC)
{
int i, len = TREE_VEC_LENGTH (ce->value);
for (i = 0; i < len; ++i)
CONSTRUCTOR_APPEND_ELT (n, 0,
TREE_VEC_ELT (ce->value, i));
}
else
CONSTRUCTOR_APPEND_ELT (n, 0, ce->value);
}
}
r = build_constructor (init_list_type_node, n);
CONSTRUCTOR_IS_DIRECT_INIT (r) = CONSTRUCTOR_IS_DIRECT_INIT (t);
CONSTRUCTOR_IS_DESIGNATED_INIT (r)
= CONSTRUCTOR_IS_DESIGNATED_INIT (t);
if (TREE_HAS_CONSTRUCTOR (t))
{
fcl_t cl = fcl_functional;
if (CONSTRUCTOR_C99_COMPOUND_LITERAL (t))
cl = fcl_c99;
RETURN (finish_compound_literal (type, r, complain, cl));
}
TREE_TYPE (r) = type;
RETURN (r);
}
case TYPEID_EXPR:
{
tree operand_0 = TREE_OPERAND (t, 0);
if (TYPE_P (operand_0))
{
operand_0 = tsubst (operand_0, args, complain, in_decl);
RETURN (get_typeid (operand_0, complain));
}
else
{
operand_0 = RECUR (operand_0);
RETURN (build_typeid (operand_0, complain));
}
}
case VAR_DECL:
if (!args)
RETURN (t);
/* Fall through */
case PARM_DECL:
{
tree r = tsubst_copy (t, args, complain, in_decl);
/* ??? We're doing a subset of finish_id_expression here. */
if (tree wrap = maybe_get_tls_wrapper_call (r))
/* Replace an evaluated use of the thread_local variable with
a call to its wrapper. */
r = wrap;
else if (outer_automatic_var_p (r))
r = process_outer_var_ref (r, complain);
if (!TYPE_REF_P (TREE_TYPE (t)))
/* If the original type was a reference, we'll be wrapped in
the appropriate INDIRECT_REF. */
r = convert_from_reference (r);
RETURN (r);
}
case VA_ARG_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
RETURN (build_x_va_arg (EXPR_LOCATION (t), op0, type));
}
case OFFSETOF_EXPR:
{
tree object_ptr
= tsubst_copy_and_build (TREE_OPERAND (t, 1), args, complain,
in_decl, /*function_p=*/false,
/*integral_constant_expression_p=*/false);
RETURN (finish_offsetof (object_ptr,
RECUR (TREE_OPERAND (t, 0)),
EXPR_LOCATION (t)));
}
case ADDRESSOF_EXPR:
RETURN (cp_build_addressof (EXPR_LOCATION (t),
RECUR (TREE_OPERAND (t, 0)), complain));
case TRAIT_EXPR:
{
tree type1 = tsubst (TRAIT_EXPR_TYPE1 (t), args,
complain, in_decl);
tree type2 = tsubst (TRAIT_EXPR_TYPE2 (t), args,
complain, in_decl);
RETURN (finish_trait_expr (TRAIT_EXPR_LOCATION (t),
TRAIT_EXPR_KIND (t), type1, type2));
}
case STMT_EXPR:
{
tree old_stmt_expr = cur_stmt_expr;
tree stmt_expr = begin_stmt_expr ();
cur_stmt_expr = stmt_expr;
tsubst_expr (STMT_EXPR_STMT (t), args, complain, in_decl,
integral_constant_expression_p);
stmt_expr = finish_stmt_expr (stmt_expr, false);
cur_stmt_expr = old_stmt_expr;
/* If the resulting list of expression statement is empty,
fold it further into void_node. */
if (empty_expr_stmt_p (stmt_expr))
stmt_expr = void_node;
RETURN (stmt_expr);
}
case LAMBDA_EXPR:
{
if (complain & tf_partial)
{
/* We don't have a full set of template arguments yet; don't touch
the lambda at all. */
gcc_assert (processing_template_decl);
return t;
}
tree r = tsubst_lambda_expr (t, args, complain, in_decl);
RETURN (build_lambda_object (r));
}
case TARGET_EXPR:
/* We can get here for a constant initializer of non-dependent type.
FIXME stop folding in cp_parser_initializer_clause. */
{
tree r = get_target_expr_sfinae (RECUR (TARGET_EXPR_INITIAL (t)),
complain);
RETURN (r);
}
case TRANSACTION_EXPR:
RETURN (tsubst_expr(t, args, complain, in_decl,
integral_constant_expression_p));
case PAREN_EXPR:
RETURN (finish_parenthesized_expr (RECUR (TREE_OPERAND (t, 0))));
case VEC_PERM_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
tree op2 = RECUR (TREE_OPERAND (t, 2));
RETURN (build_x_vec_perm_expr (input_location, op0, op1, op2,
complain));
}
case REQUIRES_EXPR:
{
tree r = tsubst_requires_expr (t, args, tf_none, in_decl);
RETURN (r);
}
case RANGE_EXPR:
/* No need to substitute further, a RANGE_EXPR will always be built
with constant operands. */
RETURN (t);
case NON_LVALUE_EXPR:
case VIEW_CONVERT_EXPR:
if (location_wrapper_p (t))
/* We need to do this here as well as in tsubst_copy so we get the
other tsubst_copy_and_build semantics for a PARM_DECL operand. */
RETURN (maybe_wrap_with_location (RECUR (TREE_OPERAND (t, 0)),
EXPR_LOCATION (t)));
/* fallthrough. */
default:
/* Handle Objective-C++ constructs, if appropriate. */
{
tree subst
= objcp_tsubst_copy_and_build (t, args, complain,
in_decl, /*function_p=*/false);
if (subst)
RETURN (subst);
}
RETURN (tsubst_copy (t, args, complain, in_decl));
}
#undef RECUR
#undef RETURN
out:
input_location = save_loc;
return retval;
}
/* Verify that the instantiated ARGS are valid. For type arguments,
make sure that the type's linkage is ok. For non-type arguments,
make sure they are constants if they are integral or enumerations.
Emit an error under control of COMPLAIN, and return TRUE on error. */
static bool
check_instantiated_arg (tree tmpl, tree t, tsubst_flags_t complain)
{
if (dependent_template_arg_p (t))
return false;
if (ARGUMENT_PACK_P (t))
{
tree vec = ARGUMENT_PACK_ARGS (t);
int len = TREE_VEC_LENGTH (vec);
bool result = false;
int i;
for (i = 0; i < len; ++i)
if (check_instantiated_arg (tmpl, TREE_VEC_ELT (vec, i), complain))
result = true;
return result;
}
else if (TYPE_P (t))
{
/* [basic.link]: A name with no linkage (notably, the name
of a class or enumeration declared in a local scope)
shall not be used to declare an entity with linkage.
This implies that names with no linkage cannot be used as
template arguments
DR 757 relaxes this restriction for C++0x. */
tree nt = (cxx_dialect > cxx98 ? NULL_TREE
: no_linkage_check (t, /*relaxed_p=*/false));
if (nt)
{
/* DR 488 makes use of a type with no linkage cause
type deduction to fail. */
if (complain & tf_error)
{
if (TYPE_UNNAMED_P (nt))
error ("%qT is/uses unnamed type", t);
else
error ("template argument for %qD uses local type %qT",
tmpl, t);
}
return true;
}
/* In order to avoid all sorts of complications, we do not
allow variably-modified types as template arguments. */
else if (variably_modified_type_p (t, NULL_TREE))
{
if (complain & tf_error)
error ("%qT is a variably modified type", t);
return true;
}
}
/* Class template and alias template arguments should be OK. */
else if (DECL_TYPE_TEMPLATE_P (t))
;
/* A non-type argument of integral or enumerated type must be a
constant. */
else if (TREE_TYPE (t)
&& INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (t))
&& !REFERENCE_REF_P (t)
&& !TREE_CONSTANT (t))
{
if (complain & tf_error)
error ("integral expression %qE is not constant", t);
return true;
}
return false;
}
static bool
check_instantiated_args (tree tmpl, tree args, tsubst_flags_t complain)
{
int ix, len = DECL_NTPARMS (tmpl);
bool result = false;
for (ix = 0; ix != len; ix++)
{
if (check_instantiated_arg (tmpl, TREE_VEC_ELT (args, ix), complain))
result = true;
}
if (result && (complain & tf_error))
error (" trying to instantiate %qD", tmpl);
return result;
}
/* We're out of SFINAE context now, so generate diagnostics for the access
errors we saw earlier when instantiating D from TMPL and ARGS. */
static void
recheck_decl_substitution (tree d, tree tmpl, tree args)
{
tree pattern = DECL_TEMPLATE_RESULT (tmpl);
tree type = TREE_TYPE (pattern);
location_t loc = input_location;
push_access_scope (d);
push_deferring_access_checks (dk_no_deferred);
input_location = DECL_SOURCE_LOCATION (pattern);
tsubst (type, args, tf_warning_or_error, d);
input_location = loc;
pop_deferring_access_checks ();
pop_access_scope (d);
}
/* Instantiate the indicated variable, function, or alias template TMPL with
the template arguments in TARG_PTR. */
static tree
instantiate_template_1 (tree tmpl, tree orig_args, tsubst_flags_t complain)
{
tree targ_ptr = orig_args;
tree fndecl;
tree gen_tmpl;
tree spec;
bool access_ok = true;
if (tmpl == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL);
/* If this function is a clone, handle it specially. */
if (DECL_CLONED_FUNCTION_P (tmpl))
{
tree spec;
tree clone;
/* Use DECL_ABSTRACT_ORIGIN because only FUNCTION_DECLs have
DECL_CLONED_FUNCTION. */
spec = instantiate_template (DECL_ABSTRACT_ORIGIN (tmpl),
targ_ptr, complain);
if (spec == error_mark_node)
return error_mark_node;
/* Look for the clone. */
FOR_EACH_CLONE (clone, spec)
if (DECL_NAME (clone) == DECL_NAME (tmpl))
return clone;
/* We should always have found the clone by now. */
gcc_unreachable ();
return NULL_TREE;
}
if (targ_ptr == error_mark_node)
return error_mark_node;
/* Check to see if we already have this specialization. */
gen_tmpl = most_general_template (tmpl);
if (TMPL_ARGS_DEPTH (targ_ptr)
< TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)))
/* targ_ptr only has the innermost template args, so add the outer ones
from tmpl, which could be either a partial instantiation or gen_tmpl (in
the case of a non-dependent call within a template definition). */
targ_ptr = (add_outermost_template_args
(DECL_TI_ARGS (DECL_TEMPLATE_RESULT (tmpl)),
targ_ptr));
/* It would be nice to avoid hashing here and then again in tsubst_decl,
but it doesn't seem to be on the hot path. */
spec = retrieve_specialization (gen_tmpl, targ_ptr, 0);
gcc_assert (tmpl == gen_tmpl
|| ((fndecl = retrieve_specialization (tmpl, orig_args, 0))
== spec)
|| fndecl == NULL_TREE);
if (spec != NULL_TREE)
{
if (FNDECL_HAS_ACCESS_ERRORS (spec))
{
if (complain & tf_error)
recheck_decl_substitution (spec, gen_tmpl, targ_ptr);
return error_mark_node;
}
return spec;
}
if (check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (targ_ptr),
complain))
return error_mark_node;
/* We are building a FUNCTION_DECL, during which the access of its
parameters and return types have to be checked. However this
FUNCTION_DECL which is the desired context for access checking
is not built yet. We solve this chicken-and-egg problem by
deferring all checks until we have the FUNCTION_DECL. */
push_deferring_access_checks (dk_deferred);
/* Instantiation of the function happens in the context of the function
template, not the context of the overload resolution we're doing. */
push_to_top_level ();
/* If there are dependent arguments, e.g. because we're doing partial
ordering, make sure processing_template_decl stays set. */
if (uses_template_parms (targ_ptr))
++processing_template_decl;
if (DECL_CLASS_SCOPE_P (gen_tmpl))
{
tree ctx = tsubst_aggr_type (DECL_CONTEXT (gen_tmpl), targ_ptr,
complain, gen_tmpl, true);
push_nested_class (ctx);
}
tree pattern = DECL_TEMPLATE_RESULT (gen_tmpl);
fndecl = NULL_TREE;
if (VAR_P (pattern))
{
/* We need to determine if we're using a partial or explicit
specialization now, because the type of the variable could be
different. */
tree tid = lookup_template_variable (gen_tmpl, targ_ptr);
tree elt = most_specialized_partial_spec (tid, complain);
if (elt == error_mark_node)
pattern = error_mark_node;
else if (elt)
{
tree partial_tmpl = TREE_VALUE (elt);
tree partial_args = TREE_PURPOSE (elt);
tree partial_pat = DECL_TEMPLATE_RESULT (partial_tmpl);
fndecl = tsubst (partial_pat, partial_args, complain, gen_tmpl);
}
}
/* Substitute template parameters to obtain the specialization. */
if (fndecl == NULL_TREE)
fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl);
if (DECL_CLASS_SCOPE_P (gen_tmpl))
pop_nested_class ();
pop_from_top_level ();
if (fndecl == error_mark_node)
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* The DECL_TI_TEMPLATE should always be the immediate parent
template, not the most general template. */
DECL_TI_TEMPLATE (fndecl) = tmpl;
DECL_TI_ARGS (fndecl) = targ_ptr;
/* Now we know the specialization, compute access previously
deferred. Do no access control for inheriting constructors,
as we already checked access for the inherited constructor. */
if (!(flag_new_inheriting_ctors
&& DECL_INHERITED_CTOR (fndecl)))
{
push_access_scope (fndecl);
if (!perform_deferred_access_checks (complain))
access_ok = false;
pop_access_scope (fndecl);
}
pop_deferring_access_checks ();
/* If we've just instantiated the main entry point for a function,
instantiate all the alternate entry points as well. We do this
by cloning the instantiation of the main entry point, not by
instantiating the template clones. */
if (tree chain = DECL_CHAIN (gen_tmpl))
if (DECL_P (chain) && DECL_CLONED_FUNCTION_P (chain))
clone_function_decl (fndecl, /*update_methods=*/false);
if (!access_ok)
{
if (!(complain & tf_error))
{
/* Remember to reinstantiate when we're out of SFINAE so the user
can see the errors. */
FNDECL_HAS_ACCESS_ERRORS (fndecl) = true;
}
return error_mark_node;
}
return fndecl;
}
/* Wrapper for instantiate_template_1. */
tree
instantiate_template (tree tmpl, tree orig_args, tsubst_flags_t complain)
{
tree ret;
timevar_push (TV_TEMPLATE_INST);
ret = instantiate_template_1 (tmpl, orig_args, complain);
timevar_pop (TV_TEMPLATE_INST);
return ret;
}
/* Instantiate the alias template TMPL with ARGS. Also push a template
instantiation level, which instantiate_template doesn't do because
functions and variables have sufficient context established by the
callers. */
static tree
instantiate_alias_template (tree tmpl, tree args, tsubst_flags_t complain)
{
if (tmpl == error_mark_node || args == error_mark_node)
return error_mark_node;
args =
coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (tmpl),
args, tmpl, complain,
/*require_all_args=*/true,
/*use_default_args=*/true);
/* FIXME check for satisfaction in check_instantiated_args. */
if (flag_concepts
&& !any_dependent_template_arguments_p (args)
&& !constraints_satisfied_p (tmpl, args))
{
if (complain & tf_error)
{
auto_diagnostic_group d;
error ("template constraint failure for %qD", tmpl);
diagnose_constraints (input_location, tmpl, args);
}
return error_mark_node;
}
if (!push_tinst_level (tmpl, args))
return error_mark_node;
tree r = instantiate_template (tmpl, args, complain);
pop_tinst_level ();
return r;
}
/* PARM is a template parameter pack for FN. Returns true iff
PARM is used in a deducible way in the argument list of FN. */
static bool
pack_deducible_p (tree parm, tree fn)
{
tree t = FUNCTION_FIRST_USER_PARMTYPE (fn);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
tree packs;
if (!PACK_EXPANSION_P (type))
continue;
for (packs = PACK_EXPANSION_PARAMETER_PACKS (type);
packs; packs = TREE_CHAIN (packs))
if (template_args_equal (TREE_VALUE (packs), parm))
{
/* The template parameter pack is used in a function parameter
pack. If this is the end of the parameter list, the
template parameter pack is deducible. */
if (TREE_CHAIN (t) == void_list_node)
return true;
else
/* Otherwise, not. Well, it could be deduced from
a non-pack parameter, but doing so would end up with
a deduction mismatch, so don't bother. */
return false;
}
}
/* The template parameter pack isn't used in any function parameter
packs, but it might be used deeper, e.g. tuple<Args...>. */
return true;
}
/* Subroutine of fn_type_unification: check non-dependent parms for
convertibility. */
static int
check_non_deducible_conversions (tree parms, const tree *args, unsigned nargs,
tree fn, unification_kind_t strict, int flags,
struct conversion **convs, bool explain_p)
{
/* Non-constructor methods need to leave a conversion for 'this', which
isn't included in nargs here. */
unsigned offset = (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)
&& !DECL_CONSTRUCTOR_P (fn));
for (unsigned ia = 0;
parms && parms != void_list_node && ia < nargs; )
{
tree parm = TREE_VALUE (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION
&& (!TREE_CHAIN (parms)
|| TREE_CHAIN (parms) == void_list_node))
/* For a function parameter pack that occurs at the end of the
parameter-declaration-list, the type A of each remaining
argument of the call is compared with the type P of the
declarator-id of the function parameter pack. */
break;
parms = TREE_CHAIN (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION)
/* For a function parameter pack that does not occur at the
end of the parameter-declaration-list, the type of the
parameter pack is a non-deduced context. */
continue;
if (!uses_template_parms (parm))
{
tree arg = args[ia];
conversion **conv_p = convs ? &convs[ia+offset] : NULL;
int lflags = conv_flags (ia, nargs, fn, arg, flags);
if (check_non_deducible_conversion (parm, arg, strict, lflags,
conv_p, explain_p))
return 1;
}
++ia;
}
return 0;
}
/* The FN is a TEMPLATE_DECL for a function. ARGS is an array with
NARGS elements of the arguments that are being used when calling
it. TARGS is a vector into which the deduced template arguments
are placed.
Returns either a FUNCTION_DECL for the matching specialization of FN or
NULL_TREE if no suitable specialization can be found. If EXPLAIN_P is
true, diagnostics will be printed to explain why it failed.
If FN is a conversion operator, or we are trying to produce a specific
specialization, RETURN_TYPE is the return type desired.
The EXPLICIT_TARGS are explicit template arguments provided via a
template-id.
The parameter STRICT is one of:
DEDUCE_CALL:
We are deducing arguments for a function call, as in
[temp.deduct.call]. If RETURN_TYPE is non-null, we are
deducing arguments for a call to the result of a conversion
function template, as in [over.call.object].
DEDUCE_CONV:
We are deducing arguments for a conversion function, as in
[temp.deduct.conv].
DEDUCE_EXACT:
We are deducing arguments when doing an explicit instantiation
as in [temp.explicit], when determining an explicit specialization
as in [temp.expl.spec], or when taking the address of a function
template, as in [temp.deduct.funcaddr]. */
tree
fn_type_unification (tree fn,
tree explicit_targs,
tree targs,
const tree *args,
unsigned int nargs,
tree return_type,
unification_kind_t strict,
int flags,
struct conversion **convs,
bool explain_p,
bool decltype_p)
{
tree parms;
tree fntype;
tree decl = NULL_TREE;
tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none);
bool ok;
static int deduction_depth;
/* type_unification_real will pass back any access checks from default
template argument substitution. */
vec<deferred_access_check, va_gc> *checks = NULL;
/* We don't have all the template args yet. */
bool incomplete = true;
tree orig_fn = fn;
if (flag_new_inheriting_ctors)
fn = strip_inheriting_ctors (fn);
tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn);
tree r = error_mark_node;
tree full_targs = targs;
if (TMPL_ARGS_DEPTH (targs)
< TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (fn)))
full_targs = (add_outermost_template_args
(DECL_TI_ARGS (DECL_TEMPLATE_RESULT (fn)),
targs));
if (decltype_p)
complain |= tf_decltype;
/* In C++0x, it's possible to have a function template whose type depends
on itself recursively. This is most obvious with decltype, but can also
occur with enumeration scope (c++/48969). So we need to catch infinite
recursion and reject the substitution at deduction time; this function
will return error_mark_node for any repeated substitution.
This also catches excessive recursion such as when f<N> depends on
f<N-1> across all integers, and returns error_mark_node for all the
substitutions back up to the initial one.
This is, of course, not reentrant. */
if (excessive_deduction_depth)
return error_mark_node;
++deduction_depth;
gcc_assert (TREE_CODE (fn) == TEMPLATE_DECL);
fntype = TREE_TYPE (fn);
if (explicit_targs)
{
/* [temp.deduct]
The specified template arguments must match the template
parameters in kind (i.e., type, nontype, template), and there
must not be more arguments than there are parameters;
otherwise type deduction fails.
Nontype arguments must match the types of the corresponding
nontype template parameters, or must be convertible to the
types of the corresponding nontype parameters as specified in
_temp.arg.nontype_, otherwise type deduction fails.
All references in the function type of the function template
to the corresponding template parameters are replaced by the
specified template argument values. If a substitution in a
template parameter or in the function type of the function
template results in an invalid type, type deduction fails. */
int i, len = TREE_VEC_LENGTH (tparms);
location_t loc = input_location;
incomplete = false;
if (explicit_targs == error_mark_node)
goto fail;
if (TMPL_ARGS_DEPTH (explicit_targs)
< TMPL_ARGS_DEPTH (full_targs))
explicit_targs = add_outermost_template_args (full_targs,
explicit_targs);
/* Adjust any explicit template arguments before entering the
substitution context. */
explicit_targs
= (coerce_template_parms (tparms, explicit_targs, NULL_TREE,
complain|tf_partial,
/*require_all_args=*/false,
/*use_default_args=*/false));
if (explicit_targs == error_mark_node)
goto fail;
/* Substitute the explicit args into the function type. This is
necessary so that, for instance, explicitly declared function
arguments can match null pointed constants. If we were given
an incomplete set of explicit args, we must not do semantic
processing during substitution as we could create partial
instantiations. */
for (i = 0; i < len; i++)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (tparms, i));
bool parameter_pack = false;
tree targ = TREE_VEC_ELT (explicit_targs, i);
/* Dig out the actual parm. */
if (TREE_CODE (parm) == TYPE_DECL
|| TREE_CODE (parm) == TEMPLATE_DECL)
{
parm = TREE_TYPE (parm);
parameter_pack = TEMPLATE_TYPE_PARAMETER_PACK (parm);
}
else if (TREE_CODE (parm) == PARM_DECL)
{
parm = DECL_INITIAL (parm);
parameter_pack = TEMPLATE_PARM_PARAMETER_PACK (parm);
}
if (targ == NULL_TREE)
/* No explicit argument for this template parameter. */
incomplete = true;
else if (parameter_pack && pack_deducible_p (parm, fn))
{
/* Mark the argument pack as "incomplete". We could
still deduce more arguments during unification.
We remove this mark in type_unification_real. */
ARGUMENT_PACK_INCOMPLETE_P(targ) = 1;
ARGUMENT_PACK_EXPLICIT_ARGS (targ)
= ARGUMENT_PACK_ARGS (targ);
/* We have some incomplete argument packs. */
incomplete = true;
}
}
if (incomplete)
{
if (!push_tinst_level (fn, explicit_targs))
{
excessive_deduction_depth = true;
goto fail;
}
++processing_template_decl;
input_location = DECL_SOURCE_LOCATION (fn);
/* Ignore any access checks; we'll see them again in
instantiate_template and they might have the wrong
access path at this point. */
push_deferring_access_checks (dk_deferred);
tsubst_flags_t ecomplain = complain | tf_partial | tf_fndecl_type;
fntype = tsubst (TREE_TYPE (fn), explicit_targs, ecomplain, NULL_TREE);
pop_deferring_access_checks ();
input_location = loc;
--processing_template_decl;
pop_tinst_level ();
if (fntype == error_mark_node)
goto fail;
}
/* Place the explicitly specified arguments in TARGS. */
explicit_targs = INNERMOST_TEMPLATE_ARGS (explicit_targs);
for (i = NUM_TMPL_ARGS (explicit_targs); i--;)
TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (explicit_targs, i);
if (!incomplete && CHECKING_P
&& !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT
(targs, NUM_TMPL_ARGS (explicit_targs));
}
if (return_type && strict != DEDUCE_CALL)
{
tree *new_args = XALLOCAVEC (tree, nargs + 1);
new_args[0] = return_type;
memcpy (new_args + 1, args, nargs * sizeof (tree));
args = new_args;
++nargs;
}
if (!incomplete)
goto deduced;
/* Never do unification on the 'this' parameter. */
parms = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (fntype));
if (return_type && strict == DEDUCE_CALL)
{
/* We're deducing for a call to the result of a template conversion
function. The parms we really want are in return_type. */
if (INDIRECT_TYPE_P (return_type))
return_type = TREE_TYPE (return_type);
parms = TYPE_ARG_TYPES (return_type);
}
else if (return_type)
{
parms = tree_cons (NULL_TREE, TREE_TYPE (fntype), parms);
}
/* We allow incomplete unification without an error message here
because the standard doesn't seem to explicitly prohibit it. Our
callers must be ready to deal with unification failures in any
event. */
/* If we aren't explaining yet, push tinst context so we can see where
any errors (e.g. from class instantiations triggered by instantiation
of default template arguments) come from. If we are explaining, this
context is redundant. */
if (!explain_p && !push_tinst_level (fn, targs))
{
excessive_deduction_depth = true;
goto fail;
}
ok = !type_unification_real (DECL_INNERMOST_TEMPLATE_PARMS (fn),
full_targs, parms, args, nargs, /*subr=*/0,
strict, &checks, explain_p);
if (!explain_p)
pop_tinst_level ();
if (!ok)
goto fail;
/* Now that we have bindings for all of the template arguments,
ensure that the arguments deduced for the template template
parameters have compatible template parameter lists. We cannot
check this property before we have deduced all template
arguments, because the template parameter types of a template
template parameter might depend on prior template parameters
deduced after the template template parameter. The following
ill-formed example illustrates this issue:
template<typename T, template<T> class C> void f(C<5>, T);
template<int N> struct X {};
void g() {
f(X<5>(), 5l); // error: template argument deduction fails
}
The template parameter list of 'C' depends on the template type
parameter 'T', but 'C' is deduced to 'X' before 'T' is deduced to
'long'. Thus, we can't check that 'C' cannot bind to 'X' at the
time that we deduce 'C'. */
if (!template_template_parm_bindings_ok_p
(DECL_INNERMOST_TEMPLATE_PARMS (fn), targs))
{
unify_inconsistent_template_template_parameters (explain_p);
goto fail;
}
/* DR 1391: All parameters have args, now check non-dependent parms for
convertibility. */
if (check_non_deducible_conversions (parms, args, nargs, fn, strict, flags,
convs, explain_p))
goto fail;
deduced:
/* All is well so far. Now, check:
[temp.deduct]
When all template arguments have been deduced, all uses of
template parameters in nondeduced contexts are replaced with
the corresponding deduced argument values. If the
substitution results in an invalid type, as described above,
type deduction fails. */
if (!push_tinst_level (fn, targs))
{
excessive_deduction_depth = true;
goto fail;
}
/* Also collect access checks from the instantiation. */
reopen_deferring_access_checks (checks);
decl = instantiate_template (fn, targs, complain);
checks = get_deferred_access_checks ();
pop_deferring_access_checks ();
pop_tinst_level ();
if (decl == error_mark_node)
goto fail;
/* Now perform any access checks encountered during substitution. */
push_access_scope (decl);
ok = perform_access_checks (checks, complain);
pop_access_scope (decl);
if (!ok)
goto fail;
/* If we're looking for an exact match, check that what we got
is indeed an exact match. It might not be if some template
parameters are used in non-deduced contexts. But don't check
for an exact match if we have dependent template arguments;
in that case we're doing partial ordering, and we already know
that we have two candidates that will provide the actual type. */
if (strict == DEDUCE_EXACT && !any_dependent_template_arguments_p (targs))
{
tree substed = TREE_TYPE (decl);
unsigned int i;
tree sarg
= skip_artificial_parms_for (decl, TYPE_ARG_TYPES (substed));
if (return_type)
sarg = tree_cons (NULL_TREE, TREE_TYPE (substed), sarg);
for (i = 0; i < nargs && sarg; ++i, sarg = TREE_CHAIN (sarg))
if (!same_type_p (args[i], TREE_VALUE (sarg)))
{
unify_type_mismatch (explain_p, args[i],
TREE_VALUE (sarg));
goto fail;
}
}
/* After doing deduction with the inherited constructor, actually return an
instantiation of the inheriting constructor. */
if (orig_fn != fn)
decl = instantiate_template (orig_fn, targs, complain);
r = decl;
fail:
--deduction_depth;
if (excessive_deduction_depth)
{
if (deduction_depth == 0)
/* Reset once we're all the way out. */
excessive_deduction_depth = false;
}
return r;
}
/* Adjust types before performing type deduction, as described in
[temp.deduct.call] and [temp.deduct.conv]. The rules in these two
sections are symmetric. PARM is the type of a function parameter
or the return type of the conversion function. ARG is the type of
the argument passed to the call, or the type of the value
initialized with the result of the conversion function.
ARG_EXPR is the original argument expression, which may be null. */
static int
maybe_adjust_types_for_deduction (unification_kind_t strict,
tree* parm,
tree* arg,
tree arg_expr)
{
int result = 0;
switch (strict)
{
case DEDUCE_CALL:
break;
case DEDUCE_CONV:
/* Swap PARM and ARG throughout the remainder of this
function; the handling is precisely symmetric since PARM
will initialize ARG rather than vice versa. */
std::swap (parm, arg);
break;
case DEDUCE_EXACT:
/* Core issue #873: Do the DR606 thing (see below) for these cases,
too, but here handle it by stripping the reference from PARM
rather than by adding it to ARG. */
if (TYPE_REF_P (*parm)
&& TYPE_REF_IS_RVALUE (*parm)
&& TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM
&& cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED
&& TYPE_REF_P (*arg)
&& !TYPE_REF_IS_RVALUE (*arg))
*parm = TREE_TYPE (*parm);
/* Nothing else to do in this case. */
return 0;
default:
gcc_unreachable ();
}
if (!TYPE_REF_P (*parm))
{
/* [temp.deduct.call]
If P is not a reference type:
--If A is an array type, the pointer type produced by the
array-to-pointer standard conversion (_conv.array_) is
used in place of A for type deduction; otherwise,
--If A is a function type, the pointer type produced by
the function-to-pointer standard conversion
(_conv.func_) is used in place of A for type deduction;
otherwise,
--If A is a cv-qualified type, the top level
cv-qualifiers of A's type are ignored for type
deduction. */
if (TREE_CODE (*arg) == ARRAY_TYPE)
*arg = build_pointer_type (TREE_TYPE (*arg));
else if (TREE_CODE (*arg) == FUNCTION_TYPE)
*arg = build_pointer_type (*arg);
else
*arg = TYPE_MAIN_VARIANT (*arg);
}
/* [14.8.2.1/3 temp.deduct.call], "A forwarding reference is an rvalue
reference to a cv-unqualified template parameter that does not represent a
template parameter of a class template (during class template argument
deduction (13.3.1.8)). If P is a forwarding reference and the argument is
an lvalue, the type "lvalue reference to A" is used in place of A for type
deduction. */
if (TYPE_REF_P (*parm)
&& TYPE_REF_IS_RVALUE (*parm)
&& TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM
&& !TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (*parm))
&& cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED
&& (arg_expr ? lvalue_p (arg_expr)
/* try_one_overload doesn't provide an arg_expr, but
functions are always lvalues. */
: TREE_CODE (*arg) == FUNCTION_TYPE))
*arg = build_reference_type (*arg);
/* [temp.deduct.call]
If P is a cv-qualified type, the top level cv-qualifiers
of P's type are ignored for type deduction. If P is a
reference type, the type referred to by P is used for
type deduction. */
*parm = TYPE_MAIN_VARIANT (*parm);
if (TYPE_REF_P (*parm))
{
*parm = TREE_TYPE (*parm);
result |= UNIFY_ALLOW_OUTER_MORE_CV_QUAL;
}
/* DR 322. For conversion deduction, remove a reference type on parm
too (which has been swapped into ARG). */
if (strict == DEDUCE_CONV && TYPE_REF_P (*arg))
*arg = TREE_TYPE (*arg);
return result;
}
/* Subroutine of fn_type_unification. PARM is a function parameter of a
template which doesn't contain any deducible template parameters; check if
ARG is a suitable match for it. STRICT, FLAGS and EXPLAIN_P are as in
unify_one_argument. */
static int
check_non_deducible_conversion (tree parm, tree arg, int strict,
int flags, struct conversion **conv_p,
bool explain_p)
{
tree type;
if (!TYPE_P (arg))
type = TREE_TYPE (arg);
else
type = arg;
if (same_type_p (parm, type))
return unify_success (explain_p);
tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none);
if (strict == DEDUCE_CONV)
{
if (can_convert_arg (type, parm, NULL_TREE, flags, complain))
return unify_success (explain_p);
}
else if (strict != DEDUCE_EXACT)
{
bool ok = false;
tree conv_arg = TYPE_P (arg) ? NULL_TREE : arg;
if (conv_p)
/* Avoid recalculating this in add_function_candidate. */
ok = (*conv_p
= good_conversion (parm, type, conv_arg, flags, complain));
else
ok = can_convert_arg (parm, type, conv_arg, flags, complain);
if (ok)
return unify_success (explain_p);
}
if (strict == DEDUCE_EXACT)
return unify_type_mismatch (explain_p, parm, arg);
else
return unify_arg_conversion (explain_p, parm, type, arg);
}
static bool uses_deducible_template_parms (tree type);
/* Returns true iff the expression EXPR is one from which a template
argument can be deduced. In other words, if it's an undecorated
use of a template non-type parameter. */
static bool
deducible_expression (tree expr)
{
/* Strip implicit conversions. */
while (CONVERT_EXPR_P (expr) || TREE_CODE (expr) == VIEW_CONVERT_EXPR)
expr = TREE_OPERAND (expr, 0);
return (TREE_CODE (expr) == TEMPLATE_PARM_INDEX);
}
/* Returns true iff the array domain DOMAIN uses a template parameter in a
deducible way; that is, if it has a max value of <PARM> - 1. */
static bool
deducible_array_bound (tree domain)
{
if (domain == NULL_TREE)
return false;
tree max = TYPE_MAX_VALUE (domain);
if (TREE_CODE (max) != MINUS_EXPR)
return false;
return deducible_expression (TREE_OPERAND (max, 0));
}
/* Returns true iff the template arguments ARGS use a template parameter
in a deducible way. */
static bool
deducible_template_args (tree args)
{
for (int i = 0; i < TREE_VEC_LENGTH (args); ++i)
{
bool deducible;
tree elt = TREE_VEC_ELT (args, i);
if (ARGUMENT_PACK_P (elt))
deducible = deducible_template_args (ARGUMENT_PACK_ARGS (elt));
else
{
if (PACK_EXPANSION_P (elt))
elt = PACK_EXPANSION_PATTERN (elt);
if (TREE_CODE (elt) == TEMPLATE_TEMPLATE_PARM)
deducible = true;
else if (TYPE_P (elt))
deducible = uses_deducible_template_parms (elt);
else
deducible = deducible_expression (elt);
}
if (deducible)
return true;
}
return false;
}
/* Returns true iff TYPE contains any deducible references to template
parameters, as per 14.8.2.5. */
static bool
uses_deducible_template_parms (tree type)
{
if (PACK_EXPANSION_P (type))
type = PACK_EXPANSION_PATTERN (type);
/* T
cv-list T
TT<T>
TT<i>
TT<> */
if (TREE_CODE (type) == TEMPLATE_TYPE_PARM
|| TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
return true;
/* T*
T&
T&& */
if (INDIRECT_TYPE_P (type))
return uses_deducible_template_parms (TREE_TYPE (type));
/* T[integer-constant ]
type [i] */
if (TREE_CODE (type) == ARRAY_TYPE)
return (uses_deducible_template_parms (TREE_TYPE (type))
|| deducible_array_bound (TYPE_DOMAIN (type)));
/* T type ::*
type T::*
T T::*
T (type ::*)()
type (T::*)()
type (type ::*)(T)
type (T::*)(T)
T (type ::*)(T)
T (T::*)()
T (T::*)(T) */
if (TYPE_PTRMEM_P (type))
return (uses_deducible_template_parms (TYPE_PTRMEM_CLASS_TYPE (type))
|| (uses_deducible_template_parms
(TYPE_PTRMEM_POINTED_TO_TYPE (type))));
/* template-name <T> (where template-name refers to a class template)
template-name <i> (where template-name refers to a class template) */
if (CLASS_TYPE_P (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type)))
return deducible_template_args (INNERMOST_TEMPLATE_ARGS
(CLASSTYPE_TI_ARGS (type)));
/* type (T)
T()
T(T) */
if (FUNC_OR_METHOD_TYPE_P (type))
{
if (uses_deducible_template_parms (TREE_TYPE (type)))
return true;
tree parm = TYPE_ARG_TYPES (type);
if (TREE_CODE (type) == METHOD_TYPE)
parm = TREE_CHAIN (parm);
for (; parm; parm = TREE_CHAIN (parm))
if (uses_deducible_template_parms (TREE_VALUE (parm)))
return true;
}
return false;
}
/* Subroutine of type_unification_real and unify_pack_expansion to
handle unification of a single P/A pair. Parameters are as
for those functions. */
static int
unify_one_argument (tree tparms, tree targs, tree parm, tree arg,
int subr, unification_kind_t strict,
bool explain_p)
{
tree arg_expr = NULL_TREE;
int arg_strict;
if (arg == error_mark_node || parm == error_mark_node)
return unify_invalid (explain_p);
if (arg == unknown_type_node)
/* We can't deduce anything from this, but we might get all the
template args from other function args. */
return unify_success (explain_p);
/* Implicit conversions (Clause 4) will be performed on a function
argument to convert it to the type of the corresponding function
parameter if the parameter type contains no template-parameters that
participate in template argument deduction. */
if (strict != DEDUCE_EXACT
&& TYPE_P (parm) && !uses_deducible_template_parms (parm))
/* For function parameters with no deducible template parameters,
just return. We'll check non-dependent conversions later. */
return unify_success (explain_p);
switch (strict)
{
case DEDUCE_CALL:
arg_strict = (UNIFY_ALLOW_OUTER_LEVEL
| UNIFY_ALLOW_MORE_CV_QUAL
| UNIFY_ALLOW_DERIVED);
break;
case DEDUCE_CONV:
arg_strict = UNIFY_ALLOW_LESS_CV_QUAL;
break;
case DEDUCE_EXACT:
arg_strict = UNIFY_ALLOW_NONE;
break;
default:
gcc_unreachable ();
}
/* We only do these transformations if this is the top-level
parameter_type_list in a call or declaration matching; in other
situations (nested function declarators, template argument lists) we
won't be comparing a type to an expression, and we don't do any type
adjustments. */
if (!subr)
{
if (!TYPE_P (arg))
{
gcc_assert (TREE_TYPE (arg) != NULL_TREE);
if (type_unknown_p (arg))
{
/* [temp.deduct.type] A template-argument can be
deduced from a pointer to function or pointer
to member function argument if the set of
overloaded functions does not contain function
templates and at most one of a set of
overloaded functions provides a unique
match. */
resolve_overloaded_unification (tparms, targs, parm,
arg, strict,
arg_strict, explain_p);
/* If a unique match was not found, this is a
non-deduced context, so we still succeed. */
return unify_success (explain_p);
}
arg_expr = arg;
arg = unlowered_expr_type (arg);
if (arg == error_mark_node)
return unify_invalid (explain_p);
}
arg_strict |=
maybe_adjust_types_for_deduction (strict, &parm, &arg, arg_expr);
}
else
if ((TYPE_P (parm) || TREE_CODE (parm) == TEMPLATE_DECL)
!= (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL))
return unify_template_argument_mismatch (explain_p, parm, arg);
/* For deduction from an init-list we need the actual list. */
if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr))
arg = arg_expr;
return unify (tparms, targs, parm, arg, arg_strict, explain_p);
}
/* for_each_template_parm callback that always returns 0. */
static int
zero_r (tree, void *)
{
return 0;
}
/* for_each_template_parm any_fn callback to handle deduction of a template
type argument from the type of an array bound. */
static int
array_deduction_r (tree t, void *data)
{
tree_pair_p d = (tree_pair_p)data;
tree &tparms = d->purpose;
tree &targs = d->value;
if (TREE_CODE (t) == ARRAY_TYPE)
if (tree dom = TYPE_DOMAIN (t))
if (tree max = TYPE_MAX_VALUE (dom))
{
if (TREE_CODE (max) == MINUS_EXPR)
max = TREE_OPERAND (max, 0);
if (TREE_CODE (max) == TEMPLATE_PARM_INDEX)
unify (tparms, targs, TREE_TYPE (max), size_type_node,
UNIFY_ALLOW_NONE, /*explain*/false);
}
/* Keep walking. */
return 0;
}
/* Try to deduce any not-yet-deduced template type arguments from the type of
an array bound. This is handled separately from unify because 14.8.2.5 says
"The type of a type parameter is only deduced from an array bound if it is
not otherwise deduced." */
static void
try_array_deduction (tree tparms, tree targs, tree parm)
{
tree_pair_s data = { tparms, targs };
hash_set<tree> visited;
for_each_template_parm (parm, zero_r, &data, &visited,
/*nondeduced*/false, array_deduction_r);
}
/* Most parms like fn_type_unification.
If SUBR is 1, we're being called recursively (to unify the
arguments of a function or method parameter of a function
template).
CHECKS is a pointer to a vector of access checks encountered while
substituting default template arguments. */
static int
type_unification_real (tree tparms,
tree full_targs,
tree xparms,
const tree *xargs,
unsigned int xnargs,
int subr,
unification_kind_t strict,
vec<deferred_access_check, va_gc> **checks,
bool explain_p)
{
tree parm, arg;
int i;
int ntparms = TREE_VEC_LENGTH (tparms);
int saw_undeduced = 0;
tree parms;
const tree *args;
unsigned int nargs;
unsigned int ia;
gcc_assert (TREE_CODE (tparms) == TREE_VEC);
gcc_assert (xparms == NULL_TREE || TREE_CODE (xparms) == TREE_LIST);
gcc_assert (ntparms > 0);
tree targs = INNERMOST_TEMPLATE_ARGS (full_targs);
/* Reset the number of non-defaulted template arguments contained
in TARGS. */
NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs) = NULL_TREE;
again:
parms = xparms;
args = xargs;
nargs = xnargs;
ia = 0;
while (parms && parms != void_list_node
&& ia < nargs)
{
parm = TREE_VALUE (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION
&& (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node))
/* For a function parameter pack that occurs at the end of the
parameter-declaration-list, the type A of each remaining
argument of the call is compared with the type P of the
declarator-id of the function parameter pack. */
break;
parms = TREE_CHAIN (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION)
/* For a function parameter pack that does not occur at the
end of the parameter-declaration-list, the type of the
parameter pack is a non-deduced context. */
continue;
arg = args[ia];
++ia;
if (unify_one_argument (tparms, full_targs, parm, arg, subr, strict,
explain_p))
return 1;
}
if (parms
&& parms != void_list_node
&& TREE_CODE (TREE_VALUE (parms)) == TYPE_PACK_EXPANSION)
{
/* Unify the remaining arguments with the pack expansion type. */
tree argvec;
tree parmvec = make_tree_vec (1);
/* Allocate a TREE_VEC and copy in all of the arguments */
argvec = make_tree_vec (nargs - ia);
for (i = 0; ia < nargs; ++ia, ++i)
TREE_VEC_ELT (argvec, i) = args[ia];
/* Copy the parameter into parmvec. */
TREE_VEC_ELT (parmvec, 0) = TREE_VALUE (parms);
if (unify_pack_expansion (tparms, full_targs, parmvec, argvec, strict,
/*subr=*/subr, explain_p))
return 1;
/* Advance to the end of the list of parameters. */
parms = TREE_CHAIN (parms);
}
/* Fail if we've reached the end of the parm list, and more args
are present, and the parm list isn't variadic. */
if (ia < nargs && parms == void_list_node)
return unify_too_many_arguments (explain_p, nargs, ia);
/* Fail if parms are left and they don't have default values and
they aren't all deduced as empty packs (c++/57397). This is
consistent with sufficient_parms_p. */
if (parms && parms != void_list_node
&& TREE_PURPOSE (parms) == NULL_TREE)
{
unsigned int count = nargs;
tree p = parms;
bool type_pack_p;
do
{
type_pack_p = TREE_CODE (TREE_VALUE (p)) == TYPE_PACK_EXPANSION;
if (!type_pack_p)
count++;
p = TREE_CHAIN (p);
}
while (p && p != void_list_node);
if (count != nargs)
return unify_too_few_arguments (explain_p, ia, count,
type_pack_p);
}
if (!subr)
{
tsubst_flags_t complain = (explain_p
? tf_warning_or_error
: tf_none);
bool tried_array_deduction = (cxx_dialect < cxx17);
for (i = 0; i < ntparms; i++)
{
tree targ = TREE_VEC_ELT (targs, i);
tree tparm = TREE_VEC_ELT (tparms, i);
/* Clear the "incomplete" flags on all argument packs now so that
substituting them into later default arguments works. */
if (targ && ARGUMENT_PACK_P (targ))
{
ARGUMENT_PACK_INCOMPLETE_P (targ) = 0;
ARGUMENT_PACK_EXPLICIT_ARGS (targ) = NULL_TREE;
}
if (targ || tparm == error_mark_node)
continue;
tparm = TREE_VALUE (tparm);
if (TREE_CODE (tparm) == TYPE_DECL
&& !tried_array_deduction)
{
try_array_deduction (tparms, targs, xparms);
tried_array_deduction = true;
if (TREE_VEC_ELT (targs, i))
continue;
}
/* If this is an undeduced nontype parameter that depends on
a type parameter, try another pass; its type may have been
deduced from a later argument than the one from which
this parameter can be deduced. */
if (TREE_CODE (tparm) == PARM_DECL
&& uses_template_parms (TREE_TYPE (tparm))
&& saw_undeduced < 2)
{
saw_undeduced = 1;
continue;
}
/* Core issue #226 (C++0x) [temp.deduct]:
If a template argument has not been deduced, its
default template argument, if any, is used.
When we are in C++98 mode, TREE_PURPOSE will either
be NULL_TREE or ERROR_MARK_NODE, so we do not need
to explicitly check cxx_dialect here. */
if (TREE_PURPOSE (TREE_VEC_ELT (tparms, i)))
/* OK, there is a default argument. Wait until after the
conversion check to do substitution. */
continue;
/* If the type parameter is a parameter pack, then it will
be deduced to an empty parameter pack. */
if (template_parameter_pack_p (tparm))
{
tree arg;
if (TREE_CODE (tparm) == TEMPLATE_PARM_INDEX)
{
arg = make_node (NONTYPE_ARGUMENT_PACK);
TREE_CONSTANT (arg) = 1;
}
else
arg = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (arg, make_tree_vec (0));
TREE_VEC_ELT (targs, i) = arg;
continue;
}
return unify_parameter_deduction_failure (explain_p, tparm);
}
/* Now substitute into the default template arguments. */
for (i = 0; i < ntparms; i++)
{
tree targ = TREE_VEC_ELT (targs, i);
tree tparm = TREE_VEC_ELT (tparms, i);
if (targ || tparm == error_mark_node)
continue;
tree parm = TREE_VALUE (tparm);
tree arg = TREE_PURPOSE (tparm);
reopen_deferring_access_checks (*checks);
location_t save_loc = input_location;
if (DECL_P (parm))
input_location = DECL_SOURCE_LOCATION (parm);
if (saw_undeduced == 1
&& TREE_CODE (parm) == PARM_DECL
&& uses_template_parms (TREE_TYPE (parm)))
{
/* The type of this non-type parameter depends on undeduced
parameters. Don't try to use its default argument yet,
since we might deduce an argument for it on the next pass,
but do check whether the arguments we already have cause
substitution failure, so that that happens before we try
later default arguments (78489). */
++processing_template_decl;
tree type = tsubst (TREE_TYPE (parm), full_targs, complain,
NULL_TREE);
--processing_template_decl;
if (type == error_mark_node)
arg = error_mark_node;
else
arg = NULL_TREE;
}
else
{
/* Even if the call is happening in template context, getting
here means it's non-dependent, and a default argument is
considered a separate definition under [temp.decls], so we can
do this substitution without processing_template_decl. This
is important if the default argument contains something that
might be instantiation-dependent like access (87480). */
processing_template_decl_sentinel s;
tree substed = NULL_TREE;
if (saw_undeduced == 1)
{
/* First instatiate in template context, in case we still
depend on undeduced template parameters. */
++processing_template_decl;
substed = tsubst_template_arg (arg, full_targs, complain,
NULL_TREE);
--processing_template_decl;
if (substed != error_mark_node
&& !uses_template_parms (substed))
/* We replaced all the tparms, substitute again out of
template context. */
substed = NULL_TREE;
}
if (!substed)
substed = tsubst_template_arg (arg, full_targs, complain,
NULL_TREE);
if (!uses_template_parms (substed))
arg = convert_template_argument (parm, substed, full_targs,
complain, i, NULL_TREE);
else if (saw_undeduced == 1)
arg = NULL_TREE;
else
arg = error_mark_node;
}
input_location = save_loc;
*checks = get_deferred_access_checks ();
pop_deferring_access_checks ();
if (arg == error_mark_node)
return 1;
else if (arg)
{
TREE_VEC_ELT (targs, i) = arg;
/* The position of the first default template argument,
is also the number of non-defaulted arguments in TARGS.
Record that. */
if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, i);
}
}
if (saw_undeduced++ == 1)
goto again;
}
if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, TREE_VEC_LENGTH (targs));
return unify_success (explain_p);
}
/* Subroutine of type_unification_real. Args are like the variables
at the call site. ARG is an overloaded function (or template-id);
we try deducing template args from each of the overloads, and if
only one succeeds, we go with that. Modifies TARGS and returns
true on success. */
static bool
resolve_overloaded_unification (tree tparms,
tree targs,
tree parm,
tree arg,
unification_kind_t strict,
int sub_strict,
bool explain_p)
{
tree tempargs = copy_node (targs);
int good = 0;
tree goodfn = NULL_TREE;
bool addr_p;
if (TREE_CODE (arg) == ADDR_EXPR)
{
arg = TREE_OPERAND (arg, 0);
addr_p = true;
}
else
addr_p = false;
if (TREE_CODE (arg) == COMPONENT_REF)
/* Handle `&x' where `x' is some static or non-static member
function name. */
arg = TREE_OPERAND (arg, 1);
if (TREE_CODE (arg) == OFFSET_REF)
arg = TREE_OPERAND (arg, 1);
/* Strip baselink information. */
if (BASELINK_P (arg))
arg = BASELINK_FUNCTIONS (arg);
if (TREE_CODE (arg) == TEMPLATE_ID_EXPR)
{
/* If we got some explicit template args, we need to plug them into
the affected templates before we try to unify, in case the
explicit args will completely resolve the templates in question. */
int ok = 0;
tree expl_subargs = TREE_OPERAND (arg, 1);
arg = TREE_OPERAND (arg, 0);
for (lkp_iterator iter (arg); iter; ++iter)
{
tree fn = *iter;
tree subargs, elem;
if (TREE_CODE (fn) != TEMPLATE_DECL)
continue;
subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn),
expl_subargs, NULL_TREE, tf_none,
/*require_all_args=*/true,
/*use_default_args=*/true);
if (subargs != error_mark_node
&& !any_dependent_template_arguments_p (subargs))
{
fn = instantiate_template (fn, subargs, tf_none);
if (!constraints_satisfied_p (fn))
continue;
if (undeduced_auto_decl (fn))
{
/* Instantiate the function to deduce its return type. */
++function_depth;
instantiate_decl (fn, /*defer*/false, /*class*/false);
--function_depth;
}
if (flag_noexcept_type)
maybe_instantiate_noexcept (fn, tf_none);
elem = TREE_TYPE (fn);
if (try_one_overload (tparms, targs, tempargs, parm,
elem, strict, sub_strict, addr_p, explain_p)
&& (!goodfn || !same_type_p (goodfn, elem)))
{
goodfn = elem;
++good;
}
}
else if (subargs)
++ok;
}
/* If no templates (or more than one) are fully resolved by the
explicit arguments, this template-id is a non-deduced context; it
could still be OK if we deduce all template arguments for the
enclosing call through other arguments. */
if (good != 1)
good = ok;
}
else if (!OVL_P (arg))
/* If ARG is, for example, "(0, &f)" then its type will be unknown
-- but the deduction does not succeed because the expression is
not just the function on its own. */
return false;
else
for (lkp_iterator iter (arg); iter; ++iter)
{
tree fn = *iter;
if (try_one_overload (tparms, targs, tempargs, parm, TREE_TYPE (fn),
strict, sub_strict, addr_p, explain_p)
&& (!goodfn || !decls_match (goodfn, fn)))
{
goodfn = fn;
++good;
}
}
/* [temp.deduct.type] A template-argument can be deduced from a pointer
to function or pointer to member function argument if the set of
overloaded functions does not contain function templates and at most
one of a set of overloaded functions provides a unique match.
So if we found multiple possibilities, we return success but don't
deduce anything. */
if (good == 1)
{
int i = TREE_VEC_LENGTH (targs);
for (; i--; )
if (TREE_VEC_ELT (tempargs, i))
{
tree old = TREE_VEC_ELT (targs, i);
tree new_ = TREE_VEC_ELT (tempargs, i);
if (new_ && old && ARGUMENT_PACK_P (old)
&& ARGUMENT_PACK_EXPLICIT_ARGS (old))
/* Don't forget explicit template arguments in a pack. */
ARGUMENT_PACK_EXPLICIT_ARGS (new_)
= ARGUMENT_PACK_EXPLICIT_ARGS (old);
TREE_VEC_ELT (targs, i) = new_;
}
}
if (good)
return true;
return false;
}
/* Core DR 115: In contexts where deduction is done and fails, or in
contexts where deduction is not done, if a template argument list is
specified and it, along with any default template arguments, identifies
a single function template specialization, then the template-id is an
lvalue for the function template specialization. */
tree
resolve_nondeduced_context (tree orig_expr, tsubst_flags_t complain)
{
tree expr, offset, baselink;
bool addr;
if (!type_unknown_p (orig_expr))
return orig_expr;
expr = orig_expr;
addr = false;
offset = NULL_TREE;
baselink = NULL_TREE;
if (TREE_CODE (expr) == ADDR_EXPR)
{
expr = TREE_OPERAND (expr, 0);
addr = true;
}
if (TREE_CODE (expr) == OFFSET_REF)
{
offset = expr;
expr = TREE_OPERAND (expr, 1);
}
if (BASELINK_P (expr))
{
baselink = expr;
expr = BASELINK_FUNCTIONS (expr);
}
if (TREE_CODE (expr) == TEMPLATE_ID_EXPR)
{
int good = 0;
tree goodfn = NULL_TREE;
/* If we got some explicit template args, we need to plug them into
the affected templates before we try to unify, in case the
explicit args will completely resolve the templates in question. */
tree expl_subargs = TREE_OPERAND (expr, 1);
tree arg = TREE_OPERAND (expr, 0);
tree badfn = NULL_TREE;
tree badargs = NULL_TREE;
for (lkp_iterator iter (arg); iter; ++iter)
{
tree fn = *iter;
tree subargs, elem;
if (TREE_CODE (fn) != TEMPLATE_DECL)
continue;
subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn),
expl_subargs, NULL_TREE, tf_none,
/*require_all_args=*/true,
/*use_default_args=*/true);
if (subargs != error_mark_node
&& !any_dependent_template_arguments_p (subargs))
{
elem = instantiate_template (fn, subargs, tf_none);
if (elem == error_mark_node)
{
badfn = fn;
badargs = subargs;
}
else if (elem && (!goodfn || !decls_match (goodfn, elem))
&& constraints_satisfied_p (elem))
{
goodfn = elem;
++good;
}
}
}
if (good == 1)
{
mark_used (goodfn);
expr = goodfn;
if (baselink)
expr = build_baselink (BASELINK_BINFO (baselink),
BASELINK_ACCESS_BINFO (baselink),
expr, BASELINK_OPTYPE (baselink));
if (offset)
{
tree base
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (offset, 0)));
expr = build_offset_ref (base, expr, addr, complain);
}
if (addr)
expr = cp_build_addr_expr (expr, complain);
return expr;
}
else if (good == 0 && badargs && (complain & tf_error))
/* There were no good options and at least one bad one, so let the
user know what the problem is. */
instantiate_template (badfn, badargs, complain);
}
return orig_expr;
}
/* As above, but error out if the expression remains overloaded. */
tree
resolve_nondeduced_context_or_error (tree exp, tsubst_flags_t complain)
{
exp = resolve_nondeduced_context (exp, complain);
if (type_unknown_p (exp))
{
if (complain & tf_error)
cxx_incomplete_type_error (exp, TREE_TYPE (exp));
return error_mark_node;
}
return exp;
}
/* Subroutine of resolve_overloaded_unification; does deduction for a single
overload. Fills TARGS with any deduced arguments, or error_mark_node if
different overloads deduce different arguments for a given parm.
ADDR_P is true if the expression for which deduction is being
performed was of the form "& fn" rather than simply "fn".
Returns 1 on success. */
static int
try_one_overload (tree tparms,
tree orig_targs,
tree targs,
tree parm,
tree arg,
unification_kind_t strict,
int sub_strict,
bool addr_p,
bool explain_p)
{
int nargs;
tree tempargs;
int i;
if (arg == error_mark_node)
return 0;
/* [temp.deduct.type] A template-argument can be deduced from a pointer
to function or pointer to member function argument if the set of
overloaded functions does not contain function templates and at most
one of a set of overloaded functions provides a unique match.
So if this is a template, just return success. */
if (uses_template_parms (arg))
return 1;
if (TREE_CODE (arg) == METHOD_TYPE)
arg = build_ptrmemfunc_type (build_pointer_type (arg));
else if (addr_p)
arg = build_pointer_type (arg);
sub_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, NULL);
/* We don't copy orig_targs for this because if we have already deduced
some template args from previous args, unify would complain when we
try to deduce a template parameter for the same argument, even though
there isn't really a conflict. */
nargs = TREE_VEC_LENGTH (targs);
tempargs = make_tree_vec (nargs);
if (unify (tparms, tempargs, parm, arg, sub_strict, explain_p))
return 0;
/* First make sure we didn't deduce anything that conflicts with
explicitly specified args. */
for (i = nargs; i--; )
{
tree elt = TREE_VEC_ELT (tempargs, i);
tree oldelt = TREE_VEC_ELT (orig_targs, i);
if (!elt)
/*NOP*/;
else if (uses_template_parms (elt))
/* Since we're unifying against ourselves, we will fill in
template args used in the function parm list with our own
template parms. Discard them. */
TREE_VEC_ELT (tempargs, i) = NULL_TREE;
else if (oldelt && ARGUMENT_PACK_P (oldelt))
{
/* Check that the argument at each index of the deduced argument pack
is equivalent to the corresponding explicitly specified argument.
We may have deduced more arguments than were explicitly specified,
and that's OK. */
/* We used to assert ARGUMENT_PACK_INCOMPLETE_P (oldelt) here, but
that's wrong if we deduce the same argument pack from multiple
function arguments: it's only incomplete the first time. */
tree explicit_pack = ARGUMENT_PACK_ARGS (oldelt);
tree deduced_pack = ARGUMENT_PACK_ARGS (elt);
if (TREE_VEC_LENGTH (deduced_pack)
< TREE_VEC_LENGTH (explicit_pack))
return 0;
for (int j = 0; j < TREE_VEC_LENGTH (explicit_pack); j++)
if (!template_args_equal (TREE_VEC_ELT (explicit_pack, j),
TREE_VEC_ELT (deduced_pack, j)))
return 0;
}
else if (oldelt && !template_args_equal (oldelt, elt))
return 0;
}
for (i = nargs; i--; )
{
tree elt = TREE_VEC_ELT (tempargs, i);
if (elt)
TREE_VEC_ELT (targs, i) = elt;
}
return 1;
}
/* PARM is a template class (perhaps with unbound template
parameters). ARG is a fully instantiated type. If ARG can be
bound to PARM, return ARG, otherwise return NULL_TREE. TPARMS and
TARGS are as for unify. */
static tree
try_class_unification (tree tparms, tree targs, tree parm, tree arg,
bool explain_p)
{
tree copy_of_targs;
if (!CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg))
return NULL_TREE;
else if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
/* Matches anything. */;
else if (most_general_template (CLASSTYPE_TI_TEMPLATE (arg))
!= most_general_template (CLASSTYPE_TI_TEMPLATE (parm)))
return NULL_TREE;
/* We need to make a new template argument vector for the call to
unify. If we used TARGS, we'd clutter it up with the result of
the attempted unification, even if this class didn't work out.
We also don't want to commit ourselves to all the unifications
we've already done, since unification is supposed to be done on
an argument-by-argument basis. In other words, consider the
following pathological case:
template <int I, int J, int K>
struct S {};
template <int I, int J>
struct S<I, J, 2> : public S<I, I, I>, S<J, J, J> {};
template <int I, int J, int K>
void f(S<I, J, K>, S<I, I, I>);
void g() {
S<0, 0, 0> s0;
S<0, 1, 2> s2;
f(s0, s2);
}
Now, by the time we consider the unification involving `s2', we
already know that we must have `f<0, 0, 0>'. But, even though
`S<0, 1, 2>' is derived from `S<0, 0, 0>', the code is invalid
because there are two ways to unify base classes of S<0, 1, 2>
with S<I, I, I>. If we kept the already deduced knowledge, we
would reject the possibility I=1. */
copy_of_targs = make_tree_vec (TREE_VEC_LENGTH (targs));
if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
if (unify_bound_ttp_args (tparms, copy_of_targs, parm, arg, explain_p))
return NULL_TREE;
return arg;
}
/* If unification failed, we're done. */
if (unify (tparms, copy_of_targs, CLASSTYPE_TI_ARGS (parm),
CLASSTYPE_TI_ARGS (arg), UNIFY_ALLOW_NONE, explain_p))
return NULL_TREE;
return arg;
}
/* Given a template type PARM and a class type ARG, find the unique
base type in ARG that is an instance of PARM. We do not examine
ARG itself; only its base-classes. If there is not exactly one
appropriate base class, return NULL_TREE. PARM may be the type of
a partial specialization, as well as a plain template type. Used
by unify. */
static enum template_base_result
get_template_base (tree tparms, tree targs, tree parm, tree arg,
bool explain_p, tree *result)
{
tree rval = NULL_TREE;
tree binfo;
gcc_assert (RECORD_OR_UNION_CODE_P (TREE_CODE (arg)));
binfo = TYPE_BINFO (complete_type (arg));
if (!binfo)
{
/* The type could not be completed. */
*result = NULL_TREE;
return tbr_incomplete_type;
}
/* Walk in inheritance graph order. The search order is not
important, and this avoids multiple walks of virtual bases. */
for (binfo = TREE_CHAIN (binfo); binfo; binfo = TREE_CHAIN (binfo))
{
tree r = try_class_unification (tparms, targs, parm,
BINFO_TYPE (binfo), explain_p);
if (r)
{
/* If there is more than one satisfactory baseclass, then:
[temp.deduct.call]
If they yield more than one possible deduced A, the type
deduction fails.
applies. */
if (rval && !same_type_p (r, rval))
{
*result = NULL_TREE;
return tbr_ambiguous_baseclass;
}
rval = r;
}
}
*result = rval;
return tbr_success;
}
/* Returns the level of DECL, which declares a template parameter. */
static int
template_decl_level (tree decl)
{
switch (TREE_CODE (decl))
{
case TYPE_DECL:
case TEMPLATE_DECL:
return TEMPLATE_TYPE_LEVEL (TREE_TYPE (decl));
case PARM_DECL:
return TEMPLATE_PARM_LEVEL (DECL_INITIAL (decl));
default:
gcc_unreachable ();
}
return 0;
}
/* Decide whether ARG can be unified with PARM, considering only the
cv-qualifiers of each type, given STRICT as documented for unify.
Returns nonzero iff the unification is OK on that basis. */
static int
check_cv_quals_for_unify (int strict, tree arg, tree parm)
{
int arg_quals = cp_type_quals (arg);
int parm_quals = cp_type_quals (parm);
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
&& !(strict & UNIFY_ALLOW_OUTER_MORE_CV_QUAL))
{
/* Although a CVR qualifier is ignored when being applied to a
substituted template parameter ([8.3.2]/1 for example), that
does not allow us to unify "const T" with "int&" because both
types are not of the form "cv-list T" [14.8.2.5 temp.deduct.type].
It is ok when we're allowing additional CV qualifiers
at the outer level [14.8.2.1]/3,1st bullet. */
if ((TYPE_REF_P (arg)
|| FUNC_OR_METHOD_TYPE_P (arg))
&& (parm_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)))
return 0;
if ((!INDIRECT_TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_TYPE_PARM)
&& (parm_quals & TYPE_QUAL_RESTRICT))
return 0;
}
if (!(strict & (UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_OUTER_MORE_CV_QUAL))
&& (arg_quals & parm_quals) != parm_quals)
return 0;
if (!(strict & (UNIFY_ALLOW_LESS_CV_QUAL | UNIFY_ALLOW_OUTER_LESS_CV_QUAL))
&& (parm_quals & arg_quals) != arg_quals)
return 0;
return 1;
}
/* Determines the LEVEL and INDEX for the template parameter PARM. */
void
template_parm_level_and_index (tree parm, int* level, int* index)
{
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
*index = TEMPLATE_TYPE_IDX (parm);
*level = TEMPLATE_TYPE_LEVEL (parm);
}
else
{
*index = TEMPLATE_PARM_IDX (parm);
*level = TEMPLATE_PARM_LEVEL (parm);
}
}
#define RECUR_AND_CHECK_FAILURE(TP, TA, P, A, S, EP) \
do { \
if (unify (TP, TA, P, A, S, EP)) \
return 1; \
} while (0)
/* Unifies the remaining arguments in PACKED_ARGS with the pack
expansion at the end of PACKED_PARMS. Returns 0 if the type
deduction succeeds, 1 otherwise. STRICT is the same as in
fn_type_unification. CALL_ARGS_P is true iff PACKED_ARGS is actually a
function call argument list. We'll need to adjust the arguments to make them
types. SUBR tells us if this is from a recursive call to
type_unification_real, or for comparing two template argument
lists. */
static int
unify_pack_expansion (tree tparms, tree targs, tree packed_parms,
tree packed_args, unification_kind_t strict,
bool subr, bool explain_p)
{
tree parm
= TREE_VEC_ELT (packed_parms, TREE_VEC_LENGTH (packed_parms) - 1);
tree pattern = PACK_EXPANSION_PATTERN (parm);
tree pack, packs = NULL_TREE;
int i, start = TREE_VEC_LENGTH (packed_parms) - 1;
/* Add in any args remembered from an earlier partial instantiation. */
targs = add_to_template_args (PACK_EXPANSION_EXTRA_ARGS (parm), targs);
int levels = TMPL_ARGS_DEPTH (targs);
packed_args = expand_template_argument_pack (packed_args);
int len = TREE_VEC_LENGTH (packed_args);
/* Determine the parameter packs we will be deducing from the
pattern, and record their current deductions. */
for (pack = PACK_EXPANSION_PARAMETER_PACKS (parm);
pack; pack = TREE_CHAIN (pack))
{
tree parm_pack = TREE_VALUE (pack);
int idx, level;
/* Only template parameter packs can be deduced, not e.g. function
parameter packs or __bases or __integer_pack. */
if (!TEMPLATE_PARM_P (parm_pack))
continue;
/* Determine the index and level of this parameter pack. */
template_parm_level_and_index (parm_pack, &level, &idx);
if (level < levels)
continue;
/* Keep track of the parameter packs and their corresponding
argument packs. */
packs = tree_cons (parm_pack, TMPL_ARG (targs, level, idx), packs);
TREE_TYPE (packs) = make_tree_vec (len - start);
}
/* Loop through all of the arguments that have not yet been
unified and unify each with the pattern. */
for (i = start; i < len; i++)
{
tree parm;
bool any_explicit = false;
tree arg = TREE_VEC_ELT (packed_args, i);
/* For each parameter pack, set its TMPL_ARG to either NULL_TREE
or the element of its argument pack at the current index if
this argument was explicitly specified. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
int idx, level;
tree arg, pargs;
template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx);
arg = NULL_TREE;
if (TREE_VALUE (pack)
&& (pargs = ARGUMENT_PACK_EXPLICIT_ARGS (TREE_VALUE (pack)))
&& (i - start < TREE_VEC_LENGTH (pargs)))
{
any_explicit = true;
arg = TREE_VEC_ELT (pargs, i - start);
}
TMPL_ARG (targs, level, idx) = arg;
}
/* If we had explicit template arguments, substitute them into the
pattern before deduction. */
if (any_explicit)
{
/* Some arguments might still be unspecified or dependent. */
bool dependent;
++processing_template_decl;
dependent = any_dependent_template_arguments_p (targs);
if (!dependent)
--processing_template_decl;
parm = tsubst (pattern, targs,
explain_p ? tf_warning_or_error : tf_none,
NULL_TREE);
if (dependent)
--processing_template_decl;
if (parm == error_mark_node)
return 1;
}
else
parm = pattern;
/* Unify the pattern with the current argument. */
if (unify_one_argument (tparms, targs, parm, arg, subr, strict,
explain_p))
return 1;
/* For each parameter pack, collect the deduced value. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
int idx, level;
template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx);
TREE_VEC_ELT (TREE_TYPE (pack), i - start) =
TMPL_ARG (targs, level, idx);
}
}
/* Verify that the results of unification with the parameter packs
produce results consistent with what we've seen before, and make
the deduced argument packs available. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
tree old_pack = TREE_VALUE (pack);
tree new_args = TREE_TYPE (pack);
int i, len = TREE_VEC_LENGTH (new_args);
int idx, level;
bool nondeduced_p = false;
/* By default keep the original deduced argument pack.
If necessary, more specific code is going to update the
resulting deduced argument later down in this function. */
template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx);
TMPL_ARG (targs, level, idx) = old_pack;
/* If NEW_ARGS contains any NULL_TREE entries, we didn't
actually deduce anything. */
for (i = 0; i < len && !nondeduced_p; ++i)
if (TREE_VEC_ELT (new_args, i) == NULL_TREE)
nondeduced_p = true;
if (nondeduced_p)
continue;
if (old_pack && ARGUMENT_PACK_INCOMPLETE_P (old_pack))
{
/* If we had fewer function args than explicit template args,
just use the explicits. */
tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack);
int explicit_len = TREE_VEC_LENGTH (explicit_args);
if (len < explicit_len)
new_args = explicit_args;
}
if (!old_pack)
{
tree result;
/* Build the deduced *_ARGUMENT_PACK. */
if (TREE_CODE (TREE_PURPOSE (pack)) == TEMPLATE_PARM_INDEX)
{
result = make_node (NONTYPE_ARGUMENT_PACK);
TREE_CONSTANT (result) = 1;
}
else
result = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (result, new_args);
/* Note the deduced argument packs for this parameter
pack. */
TMPL_ARG (targs, level, idx) = result;
}
else if (ARGUMENT_PACK_INCOMPLETE_P (old_pack)
&& (ARGUMENT_PACK_ARGS (old_pack)
== ARGUMENT_PACK_EXPLICIT_ARGS (old_pack)))
{
/* We only had the explicitly-provided arguments before, but
now we have a complete set of arguments. */
tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack);
SET_ARGUMENT_PACK_ARGS (old_pack, new_args);
ARGUMENT_PACK_INCOMPLETE_P (old_pack) = 1;
ARGUMENT_PACK_EXPLICIT_ARGS (old_pack) = explicit_args;
}
else
{
tree bad_old_arg = NULL_TREE, bad_new_arg = NULL_TREE;
tree old_args = ARGUMENT_PACK_ARGS (old_pack);
if (!comp_template_args (old_args, new_args,
&bad_old_arg, &bad_new_arg))
/* Inconsistent unification of this parameter pack. */
return unify_parameter_pack_inconsistent (explain_p,
bad_old_arg,
bad_new_arg);
}
}
return unify_success (explain_p);
}
/* Handle unification of the domain of an array. PARM_DOM and ARG_DOM are
INTEGER_TYPEs representing the TYPE_DOMAIN of ARRAY_TYPEs. The other
parameters and return value are as for unify. */
static int
unify_array_domain (tree tparms, tree targs,
tree parm_dom, tree arg_dom,
bool explain_p)
{
tree parm_max;
tree arg_max;
bool parm_cst;
bool arg_cst;
/* Our representation of array types uses "N - 1" as the
TYPE_MAX_VALUE for an array with "N" elements, if "N" is
not an integer constant. We cannot unify arbitrarily
complex expressions, so we eliminate the MINUS_EXPRs
here. */
parm_max = TYPE_MAX_VALUE (parm_dom);
parm_cst = TREE_CODE (parm_max) == INTEGER_CST;
if (!parm_cst)
{
gcc_assert (TREE_CODE (parm_max) == MINUS_EXPR);
parm_max = TREE_OPERAND (parm_max, 0);
}
arg_max = TYPE_MAX_VALUE (arg_dom);
arg_cst = TREE_CODE (arg_max) == INTEGER_CST;
if (!arg_cst)
{
/* The ARG_MAX may not be a simple MINUS_EXPR, if we are
trying to unify the type of a variable with the type
of a template parameter. For example:
template <unsigned int N>
void f (char (&) [N]);
int g();
void h(int i) {
char a[g(i)];
f(a);
}
Here, the type of the ARG will be "int [g(i)]", and
may be a SAVE_EXPR, etc. */
if (TREE_CODE (arg_max) != MINUS_EXPR)
return unify_vla_arg (explain_p, arg_dom);
arg_max = TREE_OPERAND (arg_max, 0);
}
/* If only one of the bounds used a MINUS_EXPR, compensate
by adding one to the other bound. */
if (parm_cst && !arg_cst)
parm_max = fold_build2_loc (input_location, PLUS_EXPR,
integer_type_node,
parm_max,
integer_one_node);
else if (arg_cst && !parm_cst)
arg_max = fold_build2_loc (input_location, PLUS_EXPR,
integer_type_node,
arg_max,
integer_one_node);
return unify (tparms, targs, parm_max, arg_max,
UNIFY_ALLOW_INTEGER, explain_p);
}
/* Returns whether T, a P or A in unify, is a type, template or expression. */
enum pa_kind_t { pa_type, pa_tmpl, pa_expr };
static pa_kind_t
pa_kind (tree t)
{
if (PACK_EXPANSION_P (t))
t = PACK_EXPANSION_PATTERN (t);
if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (t) == UNBOUND_CLASS_TEMPLATE
|| DECL_TYPE_TEMPLATE_P (t))
return pa_tmpl;
else if (TYPE_P (t))
return pa_type;
else
return pa_expr;
}
/* Deduce the value of template parameters. TPARMS is the (innermost)
set of template parameters to a template. TARGS is the bindings
for those template parameters, as determined thus far; TARGS may
include template arguments for outer levels of template parameters
as well. PARM is a parameter to a template function, or a
subcomponent of that parameter; ARG is the corresponding argument.
This function attempts to match PARM with ARG in a manner
consistent with the existing assignments in TARGS. If more values
are deduced, then TARGS is updated.
Returns 0 if the type deduction succeeds, 1 otherwise. The
parameter STRICT is a bitwise or of the following flags:
UNIFY_ALLOW_NONE:
Require an exact match between PARM and ARG.
UNIFY_ALLOW_MORE_CV_QUAL:
Allow the deduced ARG to be more cv-qualified (by qualification
conversion) than ARG.
UNIFY_ALLOW_LESS_CV_QUAL:
Allow the deduced ARG to be less cv-qualified than ARG.
UNIFY_ALLOW_DERIVED:
Allow the deduced ARG to be a template base class of ARG,
or a pointer to a template base class of the type pointed to by
ARG.
UNIFY_ALLOW_INTEGER:
Allow any integral type to be deduced. See the TEMPLATE_PARM_INDEX
case for more information.
UNIFY_ALLOW_OUTER_LEVEL:
This is the outermost level of a deduction. Used to determine validity
of qualification conversions. A valid qualification conversion must
have const qualified pointers leading up to the inner type which
requires additional CV quals, except at the outer level, where const
is not required [conv.qual]. It would be normal to set this flag in
addition to setting UNIFY_ALLOW_MORE_CV_QUAL.
UNIFY_ALLOW_OUTER_MORE_CV_QUAL:
This is the outermost level of a deduction, and PARM can be more CV
qualified at this point.
UNIFY_ALLOW_OUTER_LESS_CV_QUAL:
This is the outermost level of a deduction, and PARM can be less CV
qualified at this point. */
static int
unify (tree tparms, tree targs, tree parm, tree arg, int strict,
bool explain_p)
{
int idx;
tree targ;
tree tparm;
int strict_in = strict;
tsubst_flags_t complain = (explain_p
? tf_warning_or_error
: tf_none);
/* I don't think this will do the right thing with respect to types.
But the only case I've seen it in so far has been array bounds, where
signedness is the only information lost, and I think that will be
okay. VIEW_CONVERT_EXPR can appear with class NTTP, thanks to
finish_id_expression_1, and are also OK. */
while (CONVERT_EXPR_P (parm) || TREE_CODE (parm) == VIEW_CONVERT_EXPR)
parm = TREE_OPERAND (parm, 0);
if (arg == error_mark_node)
return unify_invalid (explain_p);
if (arg == unknown_type_node
|| arg == init_list_type_node)
/* We can't deduce anything from this, but we might get all the
template args from other function args. */
return unify_success (explain_p);
if (parm == any_targ_node || arg == any_targ_node)
return unify_success (explain_p);
/* If PARM uses template parameters, then we can't bail out here,
even if ARG == PARM, since we won't record unifications for the
template parameters. We might need them if we're trying to
figure out which of two things is more specialized. */
if (arg == parm && !uses_template_parms (parm))
return unify_success (explain_p);
/* Handle init lists early, so the rest of the function can assume
we're dealing with a type. */
if (BRACE_ENCLOSED_INITIALIZER_P (arg))
{
tree elt, elttype;
unsigned i;
tree orig_parm = parm;
if (!is_std_init_list (parm)
&& TREE_CODE (parm) != ARRAY_TYPE)
/* We can only deduce from an initializer list argument if the
parameter is std::initializer_list or an array; otherwise this
is a non-deduced context. */
return unify_success (explain_p);
if (TREE_CODE (parm) == ARRAY_TYPE)
elttype = TREE_TYPE (parm);
else
{
elttype = TREE_VEC_ELT (CLASSTYPE_TI_ARGS (parm), 0);
/* Deduction is defined in terms of a single type, so just punt
on the (bizarre) std::initializer_list<T...>. */
if (PACK_EXPANSION_P (elttype))
return unify_success (explain_p);
}
if (strict != DEDUCE_EXACT
&& TYPE_P (elttype)
&& !uses_deducible_template_parms (elttype))
/* If ELTTYPE has no deducible template parms, skip deduction from
the list elements. */;
else
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (arg), i, elt)
{
int elt_strict = strict;
if (elt == error_mark_node)
return unify_invalid (explain_p);
if (!BRACE_ENCLOSED_INITIALIZER_P (elt))
{
tree type = TREE_TYPE (elt);
if (type == error_mark_node)
return unify_invalid (explain_p);
/* It should only be possible to get here for a call. */
gcc_assert (elt_strict & UNIFY_ALLOW_OUTER_LEVEL);
elt_strict |= maybe_adjust_types_for_deduction
(DEDUCE_CALL, &elttype, &type, elt);
elt = type;
}
RECUR_AND_CHECK_FAILURE (tparms, targs, elttype, elt, elt_strict,
explain_p);
}
if (TREE_CODE (parm) == ARRAY_TYPE
&& deducible_array_bound (TYPE_DOMAIN (parm)))
{
/* Also deduce from the length of the initializer list. */
tree max = size_int (CONSTRUCTOR_NELTS (arg));
tree idx = compute_array_index_type (NULL_TREE, max, tf_none);
if (idx == error_mark_node)
return unify_invalid (explain_p);
return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm),
idx, explain_p);
}
/* If the std::initializer_list<T> deduction worked, replace the
deduced A with std::initializer_list<A>. */
if (orig_parm != parm)
{
idx = TEMPLATE_TYPE_IDX (orig_parm);
targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx);
targ = listify (targ);
TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = targ;
}
return unify_success (explain_p);
}
/* If parm and arg aren't the same kind of thing (template, type, or
expression), fail early. */
if (pa_kind (parm) != pa_kind (arg))
return unify_invalid (explain_p);
/* Immediately reject some pairs that won't unify because of
cv-qualification mismatches. */
if (TREE_CODE (arg) == TREE_CODE (parm)
&& TYPE_P (arg)
/* It is the elements of the array which hold the cv quals of an array
type, and the elements might be template type parms. We'll check
when we recurse. */
&& TREE_CODE (arg) != ARRAY_TYPE
/* We check the cv-qualifiers when unifying with template type
parameters below. We want to allow ARG `const T' to unify with
PARM `T' for example, when computing which of two templates
is more specialized, for example. */
&& TREE_CODE (arg) != TEMPLATE_TYPE_PARM
&& !check_cv_quals_for_unify (strict_in, arg, parm))
return unify_cv_qual_mismatch (explain_p, parm, arg);
if (!(strict & UNIFY_ALLOW_OUTER_LEVEL)
&& TYPE_P (parm) && !CP_TYPE_CONST_P (parm))
strict &= ~UNIFY_ALLOW_MORE_CV_QUAL;
strict &= ~UNIFY_ALLOW_OUTER_LEVEL;
strict &= ~UNIFY_ALLOW_DERIVED;
strict &= ~UNIFY_ALLOW_OUTER_MORE_CV_QUAL;
strict &= ~UNIFY_ALLOW_OUTER_LESS_CV_QUAL;
switch (TREE_CODE (parm))
{
case TYPENAME_TYPE:
case SCOPE_REF:
case UNBOUND_CLASS_TEMPLATE:
/* In a type which contains a nested-name-specifier, template
argument values cannot be deduced for template parameters used
within the nested-name-specifier. */
return unify_success (explain_p);
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0));
if (error_operand_p (tparm))
return unify_invalid (explain_p);
if (TEMPLATE_TYPE_LEVEL (parm)
!= template_decl_level (tparm))
/* The PARM is not one we're trying to unify. Just check
to see if it matches ARG. */
{
if (TREE_CODE (arg) == TREE_CODE (parm)
&& (is_auto (parm) ? is_auto (arg)
: same_type_p (parm, arg)))
return unify_success (explain_p);
else
return unify_type_mismatch (explain_p, parm, arg);
}
idx = TEMPLATE_TYPE_IDX (parm);
targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx);
tparm = TREE_VALUE (TREE_VEC_ELT (tparms, idx));
if (error_operand_p (tparm))
return unify_invalid (explain_p);
/* Check for mixed types and values. */
if ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM
&& TREE_CODE (tparm) != TYPE_DECL)
|| (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM
&& TREE_CODE (tparm) != TEMPLATE_DECL))
gcc_unreachable ();
if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
if ((strict_in & UNIFY_ALLOW_DERIVED)
&& CLASS_TYPE_P (arg))
{
/* First try to match ARG directly. */
tree t = try_class_unification (tparms, targs, parm, arg,
explain_p);
if (!t)
{
/* Otherwise, look for a suitable base of ARG, as below. */
enum template_base_result r;
r = get_template_base (tparms, targs, parm, arg,
explain_p, &t);
if (!t)
return unify_no_common_base (explain_p, r, parm, arg);
arg = t;
}
}
/* ARG must be constructed from a template class or a template
template parameter. */
else if (TREE_CODE (arg) != BOUND_TEMPLATE_TEMPLATE_PARM
&& !CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg))
return unify_template_deduction_failure (explain_p, parm, arg);
/* Deduce arguments T, i from TT<T> or TT<i>. */
if (unify_bound_ttp_args (tparms, targs, parm, arg, explain_p))
return 1;
arg = TYPE_TI_TEMPLATE (arg);
/* Fall through to deduce template name. */
}
if (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
/* Deduce template name TT from TT, TT<>, TT<T> and TT<i>. */
/* Simple cases: Value already set, does match or doesn't. */
if (targ != NULL_TREE && template_args_equal (targ, arg))
return unify_success (explain_p);
else if (targ)
return unify_inconsistency (explain_p, parm, targ, arg);
}
else
{
/* If PARM is `const T' and ARG is only `int', we don't have
a match unless we are allowing additional qualification.
If ARG is `const int' and PARM is just `T' that's OK;
that binds `const int' to `T'. */
if (!check_cv_quals_for_unify (strict_in | UNIFY_ALLOW_LESS_CV_QUAL,
arg, parm))
return unify_cv_qual_mismatch (explain_p, parm, arg);
/* Consider the case where ARG is `const volatile int' and
PARM is `const T'. Then, T should be `volatile int'. */
arg = cp_build_qualified_type_real
(arg, cp_type_quals (arg) & ~cp_type_quals (parm), tf_none);
if (arg == error_mark_node)
return unify_invalid (explain_p);
/* Simple cases: Value already set, does match or doesn't. */
if (targ != NULL_TREE && same_type_p (targ, arg))
return unify_success (explain_p);
else if (targ)
return unify_inconsistency (explain_p, parm, targ, arg);
/* Make sure that ARG is not a variable-sized array. (Note
that were talking about variable-sized arrays (like
`int[n]'), rather than arrays of unknown size (like
`int[]').) We'll get very confused by such a type since
the bound of the array is not constant, and therefore
not mangleable. Besides, such types are not allowed in
ISO C++, so we can do as we please here. We do allow
them for 'auto' deduction, since that isn't ABI-exposed. */
if (!is_auto (parm) && variably_modified_type_p (arg, NULL_TREE))
return unify_vla_arg (explain_p, arg);
/* Strip typedefs as in convert_template_argument. */
arg = canonicalize_type_argument (arg, tf_none);
}
/* If ARG is a parameter pack or an expansion, we cannot unify
against it unless PARM is also a parameter pack. */
if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg))
&& !template_parameter_pack_p (parm))
return unify_parameter_pack_mismatch (explain_p, parm, arg);
/* If the argument deduction results is a METHOD_TYPE,
then there is a problem.
METHOD_TYPE doesn't map to any real C++ type the result of
the deduction cannot be of that type. */
if (TREE_CODE (arg) == METHOD_TYPE)
return unify_method_type_error (explain_p, arg);
TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg;
return unify_success (explain_p);
case TEMPLATE_PARM_INDEX:
tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0));
if (error_operand_p (tparm))
return unify_invalid (explain_p);
if (TEMPLATE_PARM_LEVEL (parm)
!= template_decl_level (tparm))
{
/* The PARM is not one we're trying to unify. Just check
to see if it matches ARG. */
int result = !(TREE_CODE (arg) == TREE_CODE (parm)
&& cp_tree_equal (parm, arg));
if (result)
unify_expression_unequal (explain_p, parm, arg);
return result;
}
idx = TEMPLATE_PARM_IDX (parm);
targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx);
if (targ)
{
if ((strict & UNIFY_ALLOW_INTEGER)
&& TREE_TYPE (targ) && TREE_TYPE (arg)
&& CP_INTEGRAL_TYPE_P (TREE_TYPE (targ)))
/* We're deducing from an array bound, the type doesn't matter. */
arg = fold_convert (TREE_TYPE (targ), arg);
int x = !cp_tree_equal (targ, arg);
if (x)
unify_inconsistency (explain_p, parm, targ, arg);
return x;
}
/* [temp.deduct.type] If, in the declaration of a function template
with a non-type template-parameter, the non-type
template-parameter is used in an expression in the function
parameter-list and, if the corresponding template-argument is
deduced, the template-argument type shall match the type of the
template-parameter exactly, except that a template-argument
deduced from an array bound may be of any integral type.
The non-type parameter might use already deduced type parameters. */
tparm = TREE_TYPE (parm);
if (TEMPLATE_PARM_LEVEL (parm) > TMPL_ARGS_DEPTH (targs))
/* We don't have enough levels of args to do any substitution. This
can happen in the context of -fnew-ttp-matching. */;
else
{
++processing_template_decl;
tparm = tsubst (tparm, targs, tf_none, NULL_TREE);
--processing_template_decl;
if (tree a = type_uses_auto (tparm))
{
tparm = do_auto_deduction (tparm, arg, a, complain, adc_unify);
if (tparm == error_mark_node)
return 1;
}
}
if (!TREE_TYPE (arg))
/* Template-parameter dependent expression. Just accept it for now.
It will later be processed in convert_template_argument. */
;
else if (same_type_ignoring_top_level_qualifiers_p
(non_reference (TREE_TYPE (arg)),
non_reference (tparm)))
/* OK. Ignore top-level quals here because a class-type template
parameter object is const. */;
else if ((strict & UNIFY_ALLOW_INTEGER)
&& CP_INTEGRAL_TYPE_P (tparm))
/* Convert the ARG to the type of PARM; the deduced non-type
template argument must exactly match the types of the
corresponding parameter. */
arg = fold (build_nop (tparm, arg));
else if (uses_template_parms (tparm))
{
/* We haven't deduced the type of this parameter yet. */
if (cxx_dialect >= cxx17
/* We deduce from array bounds in try_array_deduction. */
&& !(strict & UNIFY_ALLOW_INTEGER))
{
/* Deduce it from the non-type argument. */
tree atype = TREE_TYPE (arg);
RECUR_AND_CHECK_FAILURE (tparms, targs,
tparm, atype,
UNIFY_ALLOW_NONE, explain_p);
}
else
/* Try again later. */
return unify_success (explain_p);
}
else
return unify_type_mismatch (explain_p, tparm, TREE_TYPE (arg));
/* If ARG is a parameter pack or an expansion, we cannot unify
against it unless PARM is also a parameter pack. */
if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg))
&& !TEMPLATE_PARM_PARAMETER_PACK (parm))
return unify_parameter_pack_mismatch (explain_p, parm, arg);
{
bool removed_attr = false;
arg = strip_typedefs_expr (arg, &removed_attr);
}
TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg;
return unify_success (explain_p);
case PTRMEM_CST:
{
/* A pointer-to-member constant can be unified only with
another constant. */
if (TREE_CODE (arg) != PTRMEM_CST)
return unify_ptrmem_cst_mismatch (explain_p, parm, arg);
/* Just unify the class member. It would be useless (and possibly
wrong, depending on the strict flags) to unify also
PTRMEM_CST_CLASS, because we want to be sure that both parm and
arg refer to the same variable, even if through different
classes. For instance:
struct A { int x; };
struct B : A { };
Unification of &A::x and &B::x must succeed. */
return unify (tparms, targs, PTRMEM_CST_MEMBER (parm),
PTRMEM_CST_MEMBER (arg), strict, explain_p);
}
case POINTER_TYPE:
{
if (!TYPE_PTR_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
/* [temp.deduct.call]
A can be another pointer or pointer to member type that can
be converted to the deduced A via a qualification
conversion (_conv.qual_).
We pass down STRICT here rather than UNIFY_ALLOW_NONE.
This will allow for additional cv-qualification of the
pointed-to types if appropriate. */
if (TREE_CODE (TREE_TYPE (arg)) == RECORD_TYPE)
/* The derived-to-base conversion only persists through one
level of pointers. */
strict |= (strict_in & UNIFY_ALLOW_DERIVED);
return unify (tparms, targs, TREE_TYPE (parm),
TREE_TYPE (arg), strict, explain_p);
}
case REFERENCE_TYPE:
if (!TYPE_REF_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg),
strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p);
case ARRAY_TYPE:
if (TREE_CODE (arg) != ARRAY_TYPE)
return unify_type_mismatch (explain_p, parm, arg);
if ((TYPE_DOMAIN (parm) == NULL_TREE)
!= (TYPE_DOMAIN (arg) == NULL_TREE))
return unify_type_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg),
strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p);
if (TYPE_DOMAIN (parm) != NULL_TREE)
return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm),
TYPE_DOMAIN (arg), explain_p);
return unify_success (explain_p);
case REAL_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case INTEGER_TYPE:
case BOOLEAN_TYPE:
case ENUMERAL_TYPE:
case VOID_TYPE:
case NULLPTR_TYPE:
if (TREE_CODE (arg) != TREE_CODE (parm))
return unify_type_mismatch (explain_p, parm, arg);
/* We have already checked cv-qualification at the top of the
function. */
if (!same_type_ignoring_top_level_qualifiers_p (arg, parm))
return unify_type_mismatch (explain_p, parm, arg);
/* As far as unification is concerned, this wins. Later checks
will invalidate it if necessary. */
return unify_success (explain_p);
/* Types INTEGER_CST and MINUS_EXPR can come from array bounds. */
/* Type INTEGER_CST can come from ordinary constant template args. */
case INTEGER_CST:
while (CONVERT_EXPR_P (arg))
arg = TREE_OPERAND (arg, 0);
if (TREE_CODE (arg) != INTEGER_CST)
return unify_template_argument_mismatch (explain_p, parm, arg);
return (tree_int_cst_equal (parm, arg)
? unify_success (explain_p)
: unify_template_argument_mismatch (explain_p, parm, arg));
case TREE_VEC:
{
int i, len, argslen;
int parm_variadic_p = 0;
if (TREE_CODE (arg) != TREE_VEC)
return unify_template_argument_mismatch (explain_p, parm, arg);
len = TREE_VEC_LENGTH (parm);
argslen = TREE_VEC_LENGTH (arg);
/* Check for pack expansions in the parameters. */
for (i = 0; i < len; ++i)
{
if (PACK_EXPANSION_P (TREE_VEC_ELT (parm, i)))
{
if (i == len - 1)
/* We can unify against something with a trailing
parameter pack. */
parm_variadic_p = 1;
else
/* [temp.deduct.type]/9: If the template argument list of
P contains a pack expansion that is not the last
template argument, the entire template argument list
is a non-deduced context. */
return unify_success (explain_p);
}
}
/* If we don't have enough arguments to satisfy the parameters
(not counting the pack expression at the end), or we have
too many arguments for a parameter list that doesn't end in
a pack expression, we can't unify. */
if (parm_variadic_p
? argslen < len - parm_variadic_p
: argslen != len)
return unify_arity (explain_p, TREE_VEC_LENGTH (arg), len);
/* Unify all of the parameters that precede the (optional)
pack expression. */
for (i = 0; i < len - parm_variadic_p; ++i)
{
RECUR_AND_CHECK_FAILURE (tparms, targs,
TREE_VEC_ELT (parm, i),
TREE_VEC_ELT (arg, i),
UNIFY_ALLOW_NONE, explain_p);
}
if (parm_variadic_p)
return unify_pack_expansion (tparms, targs, parm, arg,
DEDUCE_EXACT,
/*subr=*/true, explain_p);
return unify_success (explain_p);
}
case RECORD_TYPE:
case UNION_TYPE:
if (TREE_CODE (arg) != TREE_CODE (parm))
return unify_type_mismatch (explain_p, parm, arg);
if (TYPE_PTRMEMFUNC_P (parm))
{
if (!TYPE_PTRMEMFUNC_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
return unify (tparms, targs,
TYPE_PTRMEMFUNC_FN_TYPE (parm),
TYPE_PTRMEMFUNC_FN_TYPE (arg),
strict, explain_p);
}
else if (TYPE_PTRMEMFUNC_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
if (CLASSTYPE_TEMPLATE_INFO (parm))
{
tree t = NULL_TREE;
if (strict_in & UNIFY_ALLOW_DERIVED)
{
/* First, we try to unify the PARM and ARG directly. */
t = try_class_unification (tparms, targs,
parm, arg, explain_p);
if (!t)
{
/* Fallback to the special case allowed in
[temp.deduct.call]:
If P is a class, and P has the form
template-id, then A can be a derived class of
the deduced A. Likewise, if P is a pointer to
a class of the form template-id, A can be a
pointer to a derived class pointed to by the
deduced A. */
enum template_base_result r;
r = get_template_base (tparms, targs, parm, arg,
explain_p, &t);
if (!t)
{
/* Don't give the derived diagnostic if we're
already dealing with the same template. */
bool same_template
= (CLASSTYPE_TEMPLATE_INFO (arg)
&& (CLASSTYPE_TI_TEMPLATE (parm)
== CLASSTYPE_TI_TEMPLATE (arg)));
return unify_no_common_base (explain_p && !same_template,
r, parm, arg);
}
}
}
else if (CLASSTYPE_TEMPLATE_INFO (arg)
&& (CLASSTYPE_TI_TEMPLATE (parm)
== CLASSTYPE_TI_TEMPLATE (arg)))
/* Perhaps PARM is something like S<U> and ARG is S<int>.
Then, we should unify `int' and `U'. */
t = arg;
else
/* There's no chance of unification succeeding. */
return unify_type_mismatch (explain_p, parm, arg);
return unify (tparms, targs, CLASSTYPE_TI_ARGS (parm),
CLASSTYPE_TI_ARGS (t), UNIFY_ALLOW_NONE, explain_p);
}
else if (!same_type_ignoring_top_level_qualifiers_p (parm, arg))
return unify_type_mismatch (explain_p, parm, arg);
return unify_success (explain_p);
case METHOD_TYPE:
case FUNCTION_TYPE:
{
unsigned int nargs;
tree *args;
tree a;
unsigned int i;
if (TREE_CODE (arg) != TREE_CODE (parm))
return unify_type_mismatch (explain_p, parm, arg);
/* CV qualifications for methods can never be deduced, they must
match exactly. We need to check them explicitly here,
because type_unification_real treats them as any other
cv-qualified parameter. */
if (TREE_CODE (parm) == METHOD_TYPE
&& (!check_cv_quals_for_unify
(UNIFY_ALLOW_NONE,
class_of_this_parm (arg),
class_of_this_parm (parm))))
return unify_cv_qual_mismatch (explain_p, parm, arg);
if (TREE_CODE (arg) == FUNCTION_TYPE
&& type_memfn_quals (parm) != type_memfn_quals (arg))
return unify_cv_qual_mismatch (explain_p, parm, arg);
if (type_memfn_rqual (parm) != type_memfn_rqual (arg))
return unify_type_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm),
TREE_TYPE (arg), UNIFY_ALLOW_NONE, explain_p);
nargs = list_length (TYPE_ARG_TYPES (arg));
args = XALLOCAVEC (tree, nargs);
for (a = TYPE_ARG_TYPES (arg), i = 0;
a != NULL_TREE && a != void_list_node;
a = TREE_CHAIN (a), ++i)
args[i] = TREE_VALUE (a);
nargs = i;
if (type_unification_real (tparms, targs, TYPE_ARG_TYPES (parm),
args, nargs, 1, DEDUCE_EXACT,
NULL, explain_p))
return 1;
if (flag_noexcept_type)
{
tree pspec = TYPE_RAISES_EXCEPTIONS (parm);
tree aspec = canonical_eh_spec (TYPE_RAISES_EXCEPTIONS (arg));
if (pspec == NULL_TREE) pspec = noexcept_false_spec;
if (aspec == NULL_TREE) aspec = noexcept_false_spec;
if (TREE_PURPOSE (pspec) && TREE_PURPOSE (aspec)
&& uses_template_parms (TREE_PURPOSE (pspec)))
RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_PURPOSE (pspec),
TREE_PURPOSE (aspec),
UNIFY_ALLOW_NONE, explain_p);
else if (nothrow_spec_p (pspec) && !nothrow_spec_p (aspec))
return unify_type_mismatch (explain_p, parm, arg);
}
return 0;
}
case OFFSET_TYPE:
/* Unify a pointer to member with a pointer to member function, which
deduces the type of the member as a function type. */
if (TYPE_PTRMEMFUNC_P (arg))
{
/* Check top-level cv qualifiers */
if (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, arg, parm))
return unify_cv_qual_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm),
TYPE_PTRMEMFUNC_OBJECT_TYPE (arg),
UNIFY_ALLOW_NONE, explain_p);
/* Determine the type of the function we are unifying against. */
tree fntype = static_fn_type (arg);
return unify (tparms, targs, TREE_TYPE (parm), fntype, strict, explain_p);
}
if (TREE_CODE (arg) != OFFSET_TYPE)
return unify_type_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm),
TYPE_OFFSET_BASETYPE (arg),
UNIFY_ALLOW_NONE, explain_p);
return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg),
strict, explain_p);
case CONST_DECL:
if (DECL_TEMPLATE_PARM_P (parm))
return unify (tparms, targs, DECL_INITIAL (parm), arg, strict, explain_p);
if (arg != scalar_constant_value (parm))
return unify_template_argument_mismatch (explain_p, parm, arg);
return unify_success (explain_p);
case FIELD_DECL:
case TEMPLATE_DECL:
/* Matched cases are handled by the ARG == PARM test above. */
return unify_template_argument_mismatch (explain_p, parm, arg);
case VAR_DECL:
/* We might get a variable as a non-type template argument in parm if the
corresponding parameter is type-dependent. Make any necessary
adjustments based on whether arg is a reference. */
if (CONSTANT_CLASS_P (arg))
parm = fold_non_dependent_expr (parm, complain);
else if (REFERENCE_REF_P (arg))
{
tree sub = TREE_OPERAND (arg, 0);
STRIP_NOPS (sub);
if (TREE_CODE (sub) == ADDR_EXPR)
arg = TREE_OPERAND (sub, 0);
}
/* Now use the normal expression code to check whether they match. */
goto expr;
case TYPE_ARGUMENT_PACK:
case NONTYPE_ARGUMENT_PACK:
return unify (tparms, targs, ARGUMENT_PACK_ARGS (parm),
ARGUMENT_PACK_ARGS (arg), strict, explain_p);
case TYPEOF_TYPE:
case DECLTYPE_TYPE:
case UNDERLYING_TYPE:
/* Cannot deduce anything from TYPEOF_TYPE, DECLTYPE_TYPE,
or UNDERLYING_TYPE nodes. */
return unify_success (explain_p);
case ERROR_MARK:
/* Unification fails if we hit an error node. */
return unify_invalid (explain_p);
case INDIRECT_REF:
if (REFERENCE_REF_P (parm))
{
bool pexp = PACK_EXPANSION_P (arg);
if (pexp)
arg = PACK_EXPANSION_PATTERN (arg);
if (REFERENCE_REF_P (arg))
arg = TREE_OPERAND (arg, 0);
if (pexp)
arg = make_pack_expansion (arg, complain);
return unify (tparms, targs, TREE_OPERAND (parm, 0), arg,
strict, explain_p);
}
/* FALLTHRU */
default:
/* An unresolved overload is a nondeduced context. */
if (is_overloaded_fn (parm) || type_unknown_p (parm))
return unify_success (explain_p);
gcc_assert (EXPR_P (parm)
|| COMPOUND_LITERAL_P (parm)
|| TREE_CODE (parm) == TRAIT_EXPR);
expr:
/* We must be looking at an expression. This can happen with
something like:
template <int I>
void foo(S<I>, S<I + 2>);
or
template<typename T>
void foo(A<T, T{}>);
This is a "non-deduced context":
[deduct.type]
The non-deduced contexts are:
--A non-type template argument or an array bound in which
a subexpression references a template parameter.
In these cases, we assume deduction succeeded, but don't
actually infer any unifications. */
if (!uses_template_parms (parm)
&& !template_args_equal (parm, arg))
return unify_expression_unequal (explain_p, parm, arg);
else
return unify_success (explain_p);
}
}
#undef RECUR_AND_CHECK_FAILURE
/* Note that DECL can be defined in this translation unit, if
required. */
static void
mark_definable (tree decl)
{
tree clone;
DECL_NOT_REALLY_EXTERN (decl) = 1;
FOR_EACH_CLONE (clone, decl)
DECL_NOT_REALLY_EXTERN (clone) = 1;
}
/* Called if RESULT is explicitly instantiated, or is a member of an
explicitly instantiated class. */
void
mark_decl_instantiated (tree result, int extern_p)
{
SET_DECL_EXPLICIT_INSTANTIATION (result);
/* If this entity has already been written out, it's too late to
make any modifications. */
if (TREE_ASM_WRITTEN (result))
return;
/* consteval functions are never emitted. */
if (TREE_CODE (result) == FUNCTION_DECL
&& DECL_IMMEDIATE_FUNCTION_P (result))
return;
/* For anonymous namespace we don't need to do anything. */
if (decl_anon_ns_mem_p (result))
{
gcc_assert (!TREE_PUBLIC (result));
return;
}
if (TREE_CODE (result) != FUNCTION_DECL)
/* The TREE_PUBLIC flag for function declarations will have been
set correctly by tsubst. */
TREE_PUBLIC (result) = 1;
/* This might have been set by an earlier implicit instantiation. */
DECL_COMDAT (result) = 0;
if (extern_p)
DECL_NOT_REALLY_EXTERN (result) = 0;
else
{
mark_definable (result);
mark_needed (result);
/* Always make artificials weak. */
if (DECL_ARTIFICIAL (result) && flag_weak)
comdat_linkage (result);
/* For WIN32 we also want to put explicit instantiations in
linkonce sections. */
else if (TREE_PUBLIC (result))
maybe_make_one_only (result);
if (TREE_CODE (result) == FUNCTION_DECL
&& DECL_TEMPLATE_INSTANTIATED (result))
/* If the function has already been instantiated, clear DECL_EXTERNAL,
since start_preparsed_function wouldn't have if we had an earlier
extern explicit instantiation. */
DECL_EXTERNAL (result) = 0;
}
/* If EXTERN_P, then this function will not be emitted -- unless
followed by an explicit instantiation, at which point its linkage
will be adjusted. If !EXTERN_P, then this function will be
emitted here. In neither circumstance do we want
import_export_decl to adjust the linkage. */
DECL_INTERFACE_KNOWN (result) = 1;
}
/* Subroutine of more_specialized_fn: check whether TARGS is missing any
important template arguments. If any are missing, we check whether
they're important by using error_mark_node for substituting into any
args that were used for partial ordering (the ones between ARGS and END)
and seeing if it bubbles up. */
static bool
check_undeduced_parms (tree targs, tree args, tree end)
{
bool found = false;
int i;
for (i = TREE_VEC_LENGTH (targs) - 1; i >= 0; --i)
if (TREE_VEC_ELT (targs, i) == NULL_TREE)
{
found = true;
TREE_VEC_ELT (targs, i) = error_mark_node;
}
if (found)
{
tree substed = tsubst_arg_types (args, targs, end, tf_none, NULL_TREE);
if (substed == error_mark_node)
return true;
}
return false;
}
/* Given two function templates PAT1 and PAT2, return:
1 if PAT1 is more specialized than PAT2 as described in [temp.func.order].
-1 if PAT2 is more specialized than PAT1.
0 if neither is more specialized.
LEN indicates the number of parameters we should consider
(defaulted parameters should not be considered).
The 1998 std underspecified function template partial ordering, and
DR214 addresses the issue. We take pairs of arguments, one from
each of the templates, and deduce them against each other. One of
the templates will be more specialized if all the *other*
template's arguments deduce against its arguments and at least one
of its arguments *does* *not* deduce against the other template's
corresponding argument. Deduction is done as for class templates.
The arguments used in deduction have reference and top level cv
qualifiers removed. Iff both arguments were originally reference
types *and* deduction succeeds in both directions, an lvalue reference
wins against an rvalue reference and otherwise the template
with the more cv-qualified argument wins for that pairing (if
neither is more cv-qualified, they both are equal). Unlike regular
deduction, after all the arguments have been deduced in this way,
we do *not* verify the deduced template argument values can be
substituted into non-deduced contexts.
The logic can be a bit confusing here, because we look at deduce1 and
targs1 to see if pat2 is at least as specialized, and vice versa; if we
can find template arguments for pat1 to make arg1 look like arg2, that
means that arg2 is at least as specialized as arg1. */
int
more_specialized_fn (tree pat1, tree pat2, int len)
{
tree decl1 = DECL_TEMPLATE_RESULT (pat1);
tree decl2 = DECL_TEMPLATE_RESULT (pat2);
tree targs1 = make_tree_vec (DECL_NTPARMS (pat1));
tree targs2 = make_tree_vec (DECL_NTPARMS (pat2));
tree tparms1 = DECL_INNERMOST_TEMPLATE_PARMS (pat1);
tree tparms2 = DECL_INNERMOST_TEMPLATE_PARMS (pat2);
tree args1 = TYPE_ARG_TYPES (TREE_TYPE (decl1));
tree args2 = TYPE_ARG_TYPES (TREE_TYPE (decl2));
tree origs1, origs2;
bool lose1 = false;
bool lose2 = false;
/* Remove the this parameter from non-static member functions. If
one is a non-static member function and the other is not a static
member function, remove the first parameter from that function
also. This situation occurs for operator functions where we
locate both a member function (with this pointer) and non-member
operator (with explicit first operand). */
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1))
{
len--; /* LEN is the number of significant arguments for DECL1 */
args1 = TREE_CHAIN (args1);
if (!DECL_STATIC_FUNCTION_P (decl2))
args2 = TREE_CHAIN (args2);
}
else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2))
{
args2 = TREE_CHAIN (args2);
if (!DECL_STATIC_FUNCTION_P (decl1))
{
len--;
args1 = TREE_CHAIN (args1);
}
}
/* If only one is a conversion operator, they are unordered. */
if (DECL_CONV_FN_P (decl1) != DECL_CONV_FN_P (decl2))
return 0;
/* Consider the return type for a conversion function */
if (DECL_CONV_FN_P (decl1))
{
args1 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl1)), args1);
args2 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl2)), args2);
len++;
}
processing_template_decl++;
origs1 = args1;
origs2 = args2;
while (len--
/* Stop when an ellipsis is seen. */
&& args1 != NULL_TREE && args2 != NULL_TREE)
{
tree arg1 = TREE_VALUE (args1);
tree arg2 = TREE_VALUE (args2);
int deduce1, deduce2;
int quals1 = -1;
int quals2 = -1;
int ref1 = 0;
int ref2 = 0;
if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION
&& TREE_CODE (arg2) == TYPE_PACK_EXPANSION)
{
/* When both arguments are pack expansions, we need only
unify the patterns themselves. */
arg1 = PACK_EXPANSION_PATTERN (arg1);
arg2 = PACK_EXPANSION_PATTERN (arg2);
/* This is the last comparison we need to do. */
len = 0;
}
/* DR 1847: If a particular P contains no template-parameters that
participate in template argument deduction, that P is not used to
determine the ordering. */
if (!uses_deducible_template_parms (arg1)
&& !uses_deducible_template_parms (arg2))
goto next;
if (TYPE_REF_P (arg1))
{
ref1 = TYPE_REF_IS_RVALUE (arg1) + 1;
arg1 = TREE_TYPE (arg1);
quals1 = cp_type_quals (arg1);
}
if (TYPE_REF_P (arg2))
{
ref2 = TYPE_REF_IS_RVALUE (arg2) + 1;
arg2 = TREE_TYPE (arg2);
quals2 = cp_type_quals (arg2);
}
arg1 = TYPE_MAIN_VARIANT (arg1);
arg2 = TYPE_MAIN_VARIANT (arg2);
if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION)
{
int i, len2 = remaining_arguments (args2);
tree parmvec = make_tree_vec (1);
tree argvec = make_tree_vec (len2);
tree ta = args2;
/* Setup the parameter vector, which contains only ARG1. */
TREE_VEC_ELT (parmvec, 0) = arg1;
/* Setup the argument vector, which contains the remaining
arguments. */
for (i = 0; i < len2; i++, ta = TREE_CHAIN (ta))
TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta);
deduce1 = (unify_pack_expansion (tparms1, targs1, parmvec,
argvec, DEDUCE_EXACT,
/*subr=*/true, /*explain_p=*/false)
== 0);
/* We cannot deduce in the other direction, because ARG1 is
a pack expansion but ARG2 is not. */
deduce2 = 0;
}
else if (TREE_CODE (arg2) == TYPE_PACK_EXPANSION)
{
int i, len1 = remaining_arguments (args1);
tree parmvec = make_tree_vec (1);
tree argvec = make_tree_vec (len1);
tree ta = args1;
/* Setup the parameter vector, which contains only ARG1. */
TREE_VEC_ELT (parmvec, 0) = arg2;
/* Setup the argument vector, which contains the remaining
arguments. */
for (i = 0; i < len1; i++, ta = TREE_CHAIN (ta))
TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta);
deduce2 = (unify_pack_expansion (tparms2, targs2, parmvec,
argvec, DEDUCE_EXACT,
/*subr=*/true, /*explain_p=*/false)
== 0);
/* We cannot deduce in the other direction, because ARG2 is
a pack expansion but ARG1 is not.*/
deduce1 = 0;
}
else
{
/* The normal case, where neither argument is a pack
expansion. */
deduce1 = (unify (tparms1, targs1, arg1, arg2,
UNIFY_ALLOW_NONE, /*explain_p=*/false)
== 0);
deduce2 = (unify (tparms2, targs2, arg2, arg1,
UNIFY_ALLOW_NONE, /*explain_p=*/false)
== 0);
}
/* If we couldn't deduce arguments for tparms1 to make arg1 match
arg2, then arg2 is not as specialized as arg1. */
if (!deduce1)
lose2 = true;
if (!deduce2)
lose1 = true;
/* "If, for a given type, deduction succeeds in both directions
(i.e., the types are identical after the transformations above)
and both P and A were reference types (before being replaced with
the type referred to above):
- if the type from the argument template was an lvalue reference and
the type from the parameter template was not, the argument type is
considered to be more specialized than the other; otherwise,
- if the type from the argument template is more cv-qualified
than the type from the parameter template (as described above),
the argument type is considered to be more specialized than the other;
otherwise,
- neither type is more specialized than the other." */
if (deduce1 && deduce2)
{
if (ref1 && ref2 && ref1 != ref2)
{
if (ref1 > ref2)
lose1 = true;
else
lose2 = true;
}
else if (quals1 != quals2 && quals1 >= 0 && quals2 >= 0)
{
if ((quals1 & quals2) == quals2)
lose2 = true;
if ((quals1 & quals2) == quals1)
lose1 = true;
}
}
if (lose1 && lose2)
/* We've failed to deduce something in either direction.
These must be unordered. */
break;
next:
if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION
|| TREE_CODE (arg2) == TYPE_PACK_EXPANSION)
/* We have already processed all of the arguments in our
handing of the pack expansion type. */
len = 0;
args1 = TREE_CHAIN (args1);
args2 = TREE_CHAIN (args2);
}
/* "In most cases, all template parameters must have values in order for
deduction to succeed, but for partial ordering purposes a template
parameter may remain without a value provided it is not used in the
types being used for partial ordering."
Thus, if we are missing any of the targs1 we need to substitute into
origs1, then pat2 is not as specialized as pat1. This can happen when
there is a nondeduced context. */
if (!lose2 && check_undeduced_parms (targs1, origs1, args1))
lose2 = true;
if (!lose1 && check_undeduced_parms (targs2, origs2, args2))
lose1 = true;
processing_template_decl--;
/* If both deductions succeed, the partial ordering selects the more
constrained template. */
/* P2113: If the corresponding template-parameters of the
template-parameter-lists are not equivalent ([temp.over.link]) or if
the function parameters that positionally correspond between the two
templates are not of the same type, neither template is more
specialized than the other. */
if (!lose1 && !lose2
&& comp_template_parms (DECL_TEMPLATE_PARMS (pat1),
DECL_TEMPLATE_PARMS (pat2))
&& compparms (origs1, origs2))
{
int winner = more_constrained (decl1, decl2);
if (winner > 0)
lose2 = true;
else if (winner < 0)
lose1 = true;
}
/* All things being equal, if the next argument is a pack expansion
for one function but not for the other, prefer the
non-variadic function. FIXME this is bogus; see c++/41958. */
if (lose1 == lose2
&& args1 && TREE_VALUE (args1)
&& args2 && TREE_VALUE (args2))
{
lose1 = TREE_CODE (TREE_VALUE (args1)) == TYPE_PACK_EXPANSION;
lose2 = TREE_CODE (TREE_VALUE (args2)) == TYPE_PACK_EXPANSION;
}
if (lose1 == lose2)
return 0;
else if (!lose1)
return 1;
else
return -1;
}
/* Determine which of two partial specializations of TMPL is more
specialized.
PAT1 is a TREE_LIST whose TREE_VALUE is the TEMPLATE_DECL corresponding
to the first partial specialization. The TREE_PURPOSE is the
innermost set of template parameters for the partial
specialization. PAT2 is similar, but for the second template.
Return 1 if the first partial specialization is more specialized;
-1 if the second is more specialized; 0 if neither is more
specialized.
See [temp.class.order] for information about determining which of
two templates is more specialized. */
static int
more_specialized_partial_spec (tree tmpl, tree pat1, tree pat2)
{
tree targs;
int winner = 0;
bool any_deductions = false;
tree tmpl1 = TREE_VALUE (pat1);
tree tmpl2 = TREE_VALUE (pat2);
tree specargs1 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl1)));
tree specargs2 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl2)));
/* Just like what happens for functions, if we are ordering between
different template specializations, we may encounter dependent
types in the arguments, and we need our dependency check functions
to behave correctly. */
++processing_template_decl;
targs = get_partial_spec_bindings (tmpl, tmpl1, specargs2);
if (targs)
{
--winner;
any_deductions = true;
}
targs = get_partial_spec_bindings (tmpl, tmpl2, specargs1);
if (targs)
{
++winner;
any_deductions = true;
}
--processing_template_decl;
/* If both deductions succeed, the partial ordering selects the more
constrained template. */
if (!winner && any_deductions)
winner = more_constrained (tmpl1, tmpl2);
/* In the case of a tie where at least one of the templates
has a parameter pack at the end, the template with the most
non-packed parameters wins. */
if (winner == 0
&& any_deductions
&& (template_args_variadic_p (TREE_PURPOSE (pat1))
|| template_args_variadic_p (TREE_PURPOSE (pat2))))
{
tree args1 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat1));
tree args2 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat2));
int len1 = TREE_VEC_LENGTH (args1);
int len2 = TREE_VEC_LENGTH (args2);
/* We don't count the pack expansion at the end. */
if (template_args_variadic_p (TREE_PURPOSE (pat1)))
--len1;
if (template_args_variadic_p (TREE_PURPOSE (pat2)))
--len2;
if (len1 > len2)
return 1;
else if (len1 < len2)
return -1;
}
return winner;
}
/* Return the template arguments that will produce the function signature
DECL from the function template FN, with the explicit template
arguments EXPLICIT_ARGS. If CHECK_RETTYPE is true, the return type must
also match. Return NULL_TREE if no satisfactory arguments could be
found. */
static tree
get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype)
{
int ntparms = DECL_NTPARMS (fn);
tree targs = make_tree_vec (ntparms);
tree decl_type = TREE_TYPE (decl);
tree decl_arg_types;
tree *args;
unsigned int nargs, ix;
tree arg;
gcc_assert (decl != DECL_TEMPLATE_RESULT (fn));
/* Never do unification on the 'this' parameter. */
decl_arg_types = skip_artificial_parms_for (decl,
TYPE_ARG_TYPES (decl_type));
nargs = list_length (decl_arg_types);
args = XALLOCAVEC (tree, nargs);
for (arg = decl_arg_types, ix = 0;
arg != NULL_TREE && arg != void_list_node;
arg = TREE_CHAIN (arg), ++ix)
args[ix] = TREE_VALUE (arg);
if (fn_type_unification (fn, explicit_args, targs,
args, ix,
(check_rettype || DECL_CONV_FN_P (fn)
? TREE_TYPE (decl_type) : NULL_TREE),
DEDUCE_EXACT, LOOKUP_NORMAL, NULL,
/*explain_p=*/false,
/*decltype*/false)
== error_mark_node)
return NULL_TREE;
return targs;
}
/* Return the innermost template arguments that, when applied to a partial
specialization SPEC_TMPL of TMPL, yield the ARGS.
For example, suppose we have:
template <class T, class U> struct S {};
template <class T> struct S<T*, int> {};
Then, suppose we want to get `S<double*, int>'. SPEC_TMPL will be the
partial specialization and the ARGS will be {double*, int}. The resulting
vector will be {double}, indicating that `T' is bound to `double'. */
static tree
get_partial_spec_bindings (tree tmpl, tree spec_tmpl, tree args)
{
tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (spec_tmpl);
tree spec_args
= TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (spec_tmpl)));
int i, ntparms = TREE_VEC_LENGTH (tparms);
tree deduced_args;
tree innermost_deduced_args;
innermost_deduced_args = make_tree_vec (ntparms);
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args))
{
deduced_args = copy_node (args);
SET_TMPL_ARGS_LEVEL (deduced_args,
TMPL_ARGS_DEPTH (deduced_args),
innermost_deduced_args);
}
else
deduced_args = innermost_deduced_args;
bool tried_array_deduction = (cxx_dialect < cxx17);
again:
if (unify (tparms, deduced_args,
INNERMOST_TEMPLATE_ARGS (spec_args),
INNERMOST_TEMPLATE_ARGS (args),
UNIFY_ALLOW_NONE, /*explain_p=*/false))
return NULL_TREE;
for (i = 0; i < ntparms; ++i)
if (! TREE_VEC_ELT (innermost_deduced_args, i))
{
if (!tried_array_deduction)
{
try_array_deduction (tparms, innermost_deduced_args,
INNERMOST_TEMPLATE_ARGS (spec_args));
tried_array_deduction = true;
if (TREE_VEC_ELT (innermost_deduced_args, i))
goto again;
}
return NULL_TREE;
}
if (!push_tinst_level (spec_tmpl, deduced_args))
{
excessive_deduction_depth = true;
return NULL_TREE;
}
/* Verify that nondeduced template arguments agree with the type
obtained from argument deduction.
For example:
struct A { typedef int X; };
template <class T, class U> struct C {};
template <class T> struct C<T, typename T::X> {};
Then with the instantiation `C<A, int>', we can deduce that
`T' is `A' but unify () does not check whether `typename T::X'
is `int'. */
spec_args = tsubst (spec_args, deduced_args, tf_none, NULL_TREE);
if (spec_args != error_mark_node)
spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl),
INNERMOST_TEMPLATE_ARGS (spec_args),
tmpl, tf_none, false, false);
pop_tinst_level ();
if (spec_args == error_mark_node
/* We only need to check the innermost arguments; the other
arguments will always agree. */
|| !comp_template_args_porder (INNERMOST_TEMPLATE_ARGS (spec_args),
INNERMOST_TEMPLATE_ARGS (args)))
return NULL_TREE;
/* Now that we have bindings for all of the template arguments,
ensure that the arguments deduced for the template template
parameters have compatible template parameter lists. See the use
of template_template_parm_bindings_ok_p in fn_type_unification
for more information. */
if (!template_template_parm_bindings_ok_p (tparms, deduced_args))
return NULL_TREE;
return deduced_args;
}
// Compare two function templates T1 and T2 by deducing bindings
// from one against the other. If both deductions succeed, compare
// constraints to see which is more constrained.
static int
more_specialized_inst (tree t1, tree t2)
{
int fate = 0;
int count = 0;
if (get_bindings (t1, DECL_TEMPLATE_RESULT (t2), NULL_TREE, true))
{
--fate;
++count;
}
if (get_bindings (t2, DECL_TEMPLATE_RESULT (t1), NULL_TREE, true))
{
++fate;
++count;
}
// If both deductions succeed, then one may be more constrained.
if (count == 2 && fate == 0)
fate = more_constrained (t1, t2);
return fate;
}
/* TEMPLATES is a TREE_LIST. Each TREE_VALUE is a TEMPLATE_DECL.
Return the TREE_LIST node with the most specialized template, if
any. If there is no most specialized template, the error_mark_node
is returned.
Note that this function does not look at, or modify, the
TREE_PURPOSE or TREE_TYPE of any of the nodes. Since the node
returned is one of the elements of INSTANTIATIONS, callers may
store information in the TREE_PURPOSE or TREE_TYPE of the nodes,
and retrieve it from the value returned. */
tree
most_specialized_instantiation (tree templates)
{
tree fn, champ;
++processing_template_decl;
champ = templates;
for (fn = TREE_CHAIN (templates); fn; fn = TREE_CHAIN (fn))
{
gcc_assert (TREE_VALUE (champ) != TREE_VALUE (fn));
int fate = more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn));
if (fate == -1)
champ = fn;
else if (!fate)
{
/* Equally specialized, move to next function. If there
is no next function, nothing's most specialized. */
fn = TREE_CHAIN (fn);
champ = fn;
if (!fn)
break;
}
}
if (champ)
/* Now verify that champ is better than everything earlier in the
instantiation list. */
for (fn = templates; fn != champ; fn = TREE_CHAIN (fn)) {
if (more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)) != 1)
{
champ = NULL_TREE;
break;
}
}
processing_template_decl--;
if (!champ)
return error_mark_node;
return champ;
}
/* If DECL is a specialization of some template, return the most
general such template. Otherwise, returns NULL_TREE.
For example, given:
template <class T> struct S { template <class U> void f(U); };
if TMPL is `template <class U> void S<int>::f(U)' this will return
the full template. This function will not trace past partial
specializations, however. For example, given in addition:
template <class T> struct S<T*> { template <class U> void f(U); };
if TMPL is `template <class U> void S<int*>::f(U)' this will return
`template <class T> template <class U> S<T*>::f(U)'. */
tree
most_general_template (tree decl)
{
if (TREE_CODE (decl) != TEMPLATE_DECL)
{
if (tree tinfo = get_template_info (decl))
decl = TI_TEMPLATE (tinfo);
/* The TI_TEMPLATE can be an IDENTIFIER_NODE for a
template friend, or a FIELD_DECL for a capture pack. */
if (TREE_CODE (decl) != TEMPLATE_DECL)
return NULL_TREE;
}
/* Look for more and more general templates. */
while (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl))
{
/* The DECL_TI_TEMPLATE can be an IDENTIFIER_NODE in some cases.
(See cp-tree.h for details.) */
if (TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL)
break;
if (CLASS_TYPE_P (TREE_TYPE (decl))
&& !TYPE_DECL_ALIAS_P (TYPE_NAME (TREE_TYPE (decl)))
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl)))
break;
/* Stop if we run into an explicitly specialized class template. */
if (!DECL_NAMESPACE_SCOPE_P (decl)
&& DECL_CONTEXT (decl)
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (decl)))
break;
decl = DECL_TI_TEMPLATE (decl);
}
return decl;
}
/* Return the most specialized of the template partial specializations
which can produce TARGET, a specialization of some class or variable
template. The value returned is actually a TREE_LIST; the TREE_VALUE is
a TEMPLATE_DECL node corresponding to the partial specialization, while
the TREE_PURPOSE is the set of template arguments that must be
substituted into the template pattern in order to generate TARGET.
If the choice of partial specialization is ambiguous, a diagnostic
is issued, and the error_mark_node is returned. If there are no
partial specializations matching TARGET, then NULL_TREE is
returned, indicating that the primary template should be used. */
tree
most_specialized_partial_spec (tree target, tsubst_flags_t complain)
{
tree list = NULL_TREE;
tree t;
tree champ;
int fate;
bool ambiguous_p;
tree outer_args = NULL_TREE;
tree tmpl, args;
if (TYPE_P (target))
{
tree tinfo = CLASSTYPE_TEMPLATE_INFO (target);
tmpl = TI_TEMPLATE (tinfo);
args = TI_ARGS (tinfo);
}
else if (TREE_CODE (target) == TEMPLATE_ID_EXPR)
{
tmpl = TREE_OPERAND (target, 0);
args = TREE_OPERAND (target, 1);
}
else if (VAR_P (target))
{
tree tinfo = DECL_TEMPLATE_INFO (target);
tmpl = TI_TEMPLATE (tinfo);
args = TI_ARGS (tinfo);
}
else
gcc_unreachable ();
tree main_tmpl = most_general_template (tmpl);
/* For determining which partial specialization to use, only the
innermost args are interesting. */
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args))
{
outer_args = strip_innermost_template_args (args, 1);
args = INNERMOST_TEMPLATE_ARGS (args);
}
/* The caller hasn't called push_to_top_level yet, but we need
get_partial_spec_bindings to be done in non-template context so that we'll
fully resolve everything. */
processing_template_decl_sentinel ptds;
for (t = DECL_TEMPLATE_SPECIALIZATIONS (main_tmpl); t; t = TREE_CHAIN (t))
{
const tree ospec_tmpl = TREE_VALUE (t);
tree spec_tmpl;
if (outer_args)
{
/* Substitute in the template args from the enclosing class. */
++processing_template_decl;
spec_tmpl = tsubst (ospec_tmpl, outer_args, tf_none, NULL_TREE);
--processing_template_decl;
if (spec_tmpl == error_mark_node)
return error_mark_node;
}
else
spec_tmpl = ospec_tmpl;
tree spec_args = get_partial_spec_bindings (tmpl, spec_tmpl, args);
if (spec_args)
{
if (outer_args)
spec_args = add_to_template_args (outer_args, spec_args);
/* Keep the candidate only if the constraints are satisfied,
or if we're not compiling with concepts. */
if (!flag_concepts
|| constraints_satisfied_p (ospec_tmpl, spec_args))
{
list = tree_cons (spec_args, ospec_tmpl, list);
TREE_TYPE (list) = TREE_TYPE (t);
}
}
}
if (! list)
return NULL_TREE;
ambiguous_p = false;
t = list;
champ = t;
t = TREE_CHAIN (t);
for (; t; t = TREE_CHAIN (t))
{
fate = more_specialized_partial_spec (tmpl, champ, t);
if (fate == 1)
;
else
{
if (fate == 0)
{
t = TREE_CHAIN (t);
if (! t)
{
ambiguous_p = true;
break;
}
}
champ = t;
}
}
if (!ambiguous_p)
for (t = list; t && t != champ; t = TREE_CHAIN (t))
{
fate = more_specialized_partial_spec (tmpl, champ, t);
if (fate != 1)
{
ambiguous_p = true;
break;
}
}
if (ambiguous_p)
{
const char *str;
char *spaces = NULL;
if (!(complain & tf_error))
return error_mark_node;
if (TYPE_P (target))
error ("ambiguous template instantiation for %q#T", target);
else
error ("ambiguous template instantiation for %q#D", target);
str = ngettext ("candidate is:", "candidates are:", list_length (list));
for (t = list; t; t = TREE_CHAIN (t))
{
tree subst = build_tree_list (TREE_VALUE (t), TREE_PURPOSE (t));
inform (DECL_SOURCE_LOCATION (TREE_VALUE (t)),
"%s %#qS", spaces ? spaces : str, subst);
spaces = spaces ? spaces : get_spaces (str);
}
free (spaces);
return error_mark_node;
}
return champ;
}
/* Explicitly instantiate DECL. */
void
do_decl_instantiation (tree decl, tree storage)
{
tree result = NULL_TREE;
int extern_p = 0;
if (!decl || decl == error_mark_node)
/* An error occurred, for which grokdeclarator has already issued
an appropriate message. */
return;
else if (! DECL_LANG_SPECIFIC (decl))
{
error ("explicit instantiation of non-template %q#D", decl);
return;
}
else if (DECL_DECLARED_CONCEPT_P (decl))
{
if (VAR_P (decl))
error ("explicit instantiation of variable concept %q#D", decl);
else
error ("explicit instantiation of function concept %q#D", decl);
return;
}
bool var_templ = (DECL_TEMPLATE_INFO (decl)
&& variable_template_p (DECL_TI_TEMPLATE (decl)));
if (VAR_P (decl) && !var_templ)
{
/* There is an asymmetry here in the way VAR_DECLs and
FUNCTION_DECLs are handled by grokdeclarator. In the case of
the latter, the DECL we get back will be marked as a
template instantiation, and the appropriate
DECL_TEMPLATE_INFO will be set up. This does not happen for
VAR_DECLs so we do the lookup here. Probably, grokdeclarator
should handle VAR_DECLs as it currently handles
FUNCTION_DECLs. */
if (!DECL_CLASS_SCOPE_P (decl))
{
error ("%qD is not a static data member of a class template", decl);
return;
}
result = lookup_field (DECL_CONTEXT (decl), DECL_NAME (decl), 0, false);
if (!result || !VAR_P (result))
{
error ("no matching template for %qD found", decl);
return;
}
if (!same_type_p (TREE_TYPE (result), TREE_TYPE (decl)))
{
error ("type %qT for explicit instantiation %qD does not match "
"declared type %qT", TREE_TYPE (result), decl,
TREE_TYPE (decl));
return;
}
}
else if (TREE_CODE (decl) != FUNCTION_DECL && !var_templ)
{
error ("explicit instantiation of %q#D", decl);
return;
}
else
result = decl;
/* Check for various error cases. Note that if the explicit
instantiation is valid the RESULT will currently be marked as an
*implicit* instantiation; DECL_EXPLICIT_INSTANTIATION is not set
until we get here. */
if (DECL_TEMPLATE_SPECIALIZATION (result))
{
/* DR 259 [temp.spec].
Both an explicit instantiation and a declaration of an explicit
specialization shall not appear in a program unless the explicit
instantiation follows a declaration of the explicit specialization.
For a given set of template parameters, if an explicit
instantiation of a template appears after a declaration of an
explicit specialization for that template, the explicit
instantiation has no effect. */
return;
}
else if (DECL_EXPLICIT_INSTANTIATION (result))
{
/* [temp.spec]
No program shall explicitly instantiate any template more
than once.
We check DECL_NOT_REALLY_EXTERN so as not to complain when
the first instantiation was `extern' and the second is not,
and EXTERN_P for the opposite case. */
if (DECL_NOT_REALLY_EXTERN (result) && !extern_p)
permerror (input_location, "duplicate explicit instantiation of %q#D", result);
/* If an "extern" explicit instantiation follows an ordinary
explicit instantiation, the template is instantiated. */
if (extern_p)
return;
}
else if (!DECL_IMPLICIT_INSTANTIATION (result))
{
error ("no matching template for %qD found", result);
return;
}
else if (!DECL_TEMPLATE_INFO (result))
{
permerror (input_location, "explicit instantiation of non-template %q#D", result);
return;
}
if (storage == NULL_TREE)
;
else if (storage == ridpointers[(int) RID_EXTERN])
{
if (cxx_dialect == cxx98)
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ 1998 forbids the use of %<extern%> on explicit "
"instantiations");
extern_p = 1;
}
else
error ("storage class %qD applied to template instantiation", storage);
check_explicit_instantiation_namespace (result);
mark_decl_instantiated (result, extern_p);
if (! extern_p)
instantiate_decl (result, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/false);
}
static void
mark_class_instantiated (tree t, int extern_p)
{
SET_CLASSTYPE_EXPLICIT_INSTANTIATION (t);
SET_CLASSTYPE_INTERFACE_KNOWN (t);
CLASSTYPE_INTERFACE_ONLY (t) = extern_p;
TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = extern_p;
if (! extern_p)
{
CLASSTYPE_DEBUG_REQUESTED (t) = 1;
rest_of_type_compilation (t, 1);
}
}
/* Called from do_type_instantiation through binding_table_foreach to
do recursive instantiation for the type bound in ENTRY. */
static void
bt_instantiate_type_proc (binding_entry entry, void *data)
{
tree storage = *(tree *) data;
if (MAYBE_CLASS_TYPE_P (entry->type)
&& CLASSTYPE_TEMPLATE_INFO (entry->type)
&& !uses_template_parms (CLASSTYPE_TI_ARGS (entry->type)))
do_type_instantiation (TYPE_MAIN_DECL (entry->type), storage, 0);
}
/* Perform an explicit instantiation of template class T. STORAGE, if
non-null, is the RID for extern, inline or static. COMPLAIN is
nonzero if this is called from the parser, zero if called recursively,
since the standard is unclear (as detailed below). */
void
do_type_instantiation (tree t, tree storage, tsubst_flags_t complain)
{
int extern_p = 0;
int nomem_p = 0;
int static_p = 0;
int previous_instantiation_extern_p = 0;
if (TREE_CODE (t) == TYPE_DECL)
t = TREE_TYPE (t);
if (! CLASS_TYPE_P (t) || ! CLASSTYPE_TEMPLATE_INFO (t))
{
tree tmpl =
(TYPE_TEMPLATE_INFO (t)) ? TYPE_TI_TEMPLATE (t) : NULL;
if (tmpl)
error ("explicit instantiation of non-class template %qD", tmpl);
else
error ("explicit instantiation of non-template type %qT", t);
return;
}
complete_type (t);
if (!COMPLETE_TYPE_P (t))
{
if (complain & tf_error)
error ("explicit instantiation of %q#T before definition of template",
t);
return;
}
if (storage != NULL_TREE)
{
if (storage == ridpointers[(int) RID_EXTERN])
{
if (cxx_dialect == cxx98)
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ 1998 forbids the use of %<extern%> on "
"explicit instantiations");
}
else
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ forbids the use of %qE"
" on explicit instantiations", storage);
if (storage == ridpointers[(int) RID_INLINE])
nomem_p = 1;
else if (storage == ridpointers[(int) RID_EXTERN])
extern_p = 1;
else if (storage == ridpointers[(int) RID_STATIC])
static_p = 1;
else
{
error ("storage class %qD applied to template instantiation",
storage);
extern_p = 0;
}
}
if (CLASSTYPE_TEMPLATE_SPECIALIZATION (t))
{
/* DR 259 [temp.spec].
Both an explicit instantiation and a declaration of an explicit
specialization shall not appear in a program unless the explicit
instantiation follows a declaration of the explicit specialization.
For a given set of template parameters, if an explicit
instantiation of a template appears after a declaration of an
explicit specialization for that template, the explicit
instantiation has no effect. */
return;
}
else if (CLASSTYPE_EXPLICIT_INSTANTIATION (t))
{
/* [temp.spec]
No program shall explicitly instantiate any template more
than once.
If PREVIOUS_INSTANTIATION_EXTERN_P, then the first explicit
instantiation was `extern'. If EXTERN_P then the second is.
These cases are OK. */
previous_instantiation_extern_p = CLASSTYPE_INTERFACE_ONLY (t);
if (!previous_instantiation_extern_p && !extern_p
&& (complain & tf_error))
permerror (input_location, "duplicate explicit instantiation of %q#T", t);
/* If we've already instantiated the template, just return now. */
if (!CLASSTYPE_INTERFACE_ONLY (t))
return;
}
check_explicit_instantiation_namespace (TYPE_NAME (t));
mark_class_instantiated (t, extern_p);
if (nomem_p)
return;
/* In contrast to implicit instantiation, where only the
declarations, and not the definitions, of members are
instantiated, we have here:
[temp.explicit]
An explicit instantiation that names a class template
specialization is also an explicit instantiation of the same
kind (declaration or definition) of each of its members (not
including members inherited from base classes and members
that are templates) that has not been previously explicitly
specialized in the translation unit containing the explicit
instantiation, provided that the associated constraints, if
any, of that member are satisfied by the template arguments
of the explicit instantiation. */
for (tree fld = TYPE_FIELDS (t); fld; fld = DECL_CHAIN (fld))
if ((VAR_P (fld)
|| (TREE_CODE (fld) == FUNCTION_DECL
&& !static_p
&& user_provided_p (fld)))
&& DECL_TEMPLATE_INSTANTIATION (fld)
&& constraints_satisfied_p (fld))
{
mark_decl_instantiated (fld, extern_p);
if (! extern_p)
instantiate_decl (fld, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/true);
}
if (CLASSTYPE_NESTED_UTDS (t))
binding_table_foreach (CLASSTYPE_NESTED_UTDS (t),
bt_instantiate_type_proc, &storage);
}
/* Given a function DECL, which is a specialization of TMPL, modify
DECL to be a re-instantiation of TMPL with the same template
arguments. TMPL should be the template into which tsubst'ing
should occur for DECL, not the most general template.
One reason for doing this is a scenario like this:
template <class T>
void f(const T&, int i);
void g() { f(3, 7); }
template <class T>
void f(const T& t, const int i) { }
Note that when the template is first instantiated, with
instantiate_template, the resulting DECL will have no name for the
first parameter, and the wrong type for the second. So, when we go
to instantiate the DECL, we regenerate it. */
static void
regenerate_decl_from_template (tree decl, tree tmpl, tree args)
{
/* The arguments used to instantiate DECL, from the most general
template. */
tree code_pattern;
code_pattern = DECL_TEMPLATE_RESULT (tmpl);
/* Make sure that we can see identifiers, and compute access
correctly. */
push_access_scope (decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
{
tree decl_parm;
tree pattern_parm;
tree specs;
int args_depth;
int parms_depth;
args_depth = TMPL_ARGS_DEPTH (args);
parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl));
if (args_depth > parms_depth)
args = get_innermost_template_args (args, parms_depth);
/* Instantiate a dynamic exception-specification. noexcept will be
handled below. */
if (tree raises = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (code_pattern)))
if (TREE_VALUE (raises))
{
specs = tsubst_exception_specification (TREE_TYPE (code_pattern),
args, tf_error, NULL_TREE,
/*defer_ok*/false);
if (specs && specs != error_mark_node)
TREE_TYPE (decl) = build_exception_variant (TREE_TYPE (decl),
specs);
}
/* Merge parameter declarations. */
decl_parm = skip_artificial_parms_for (decl,
DECL_ARGUMENTS (decl));
pattern_parm
= skip_artificial_parms_for (code_pattern,
DECL_ARGUMENTS (code_pattern));
while (decl_parm && !DECL_PACK_P (pattern_parm))
{
tree parm_type;
tree attributes;
if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm))
DECL_NAME (decl_parm) = DECL_NAME (pattern_parm);
parm_type = tsubst (TREE_TYPE (pattern_parm), args, tf_error,
NULL_TREE);
parm_type = type_decays_to (parm_type);
if (!same_type_p (TREE_TYPE (decl_parm), parm_type))
TREE_TYPE (decl_parm) = parm_type;
attributes = DECL_ATTRIBUTES (pattern_parm);
if (DECL_ATTRIBUTES (decl_parm) != attributes)
{
DECL_ATTRIBUTES (decl_parm) = attributes;
cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0);
}
decl_parm = DECL_CHAIN (decl_parm);
pattern_parm = DECL_CHAIN (pattern_parm);
}
/* Merge any parameters that match with the function parameter
pack. */
if (pattern_parm && DECL_PACK_P (pattern_parm))
{
int i, len;
tree expanded_types;
/* Expand the TYPE_PACK_EXPANSION that provides the types for
the parameters in this function parameter pack. */
expanded_types = tsubst_pack_expansion (TREE_TYPE (pattern_parm),
args, tf_error, NULL_TREE);
len = TREE_VEC_LENGTH (expanded_types);
for (i = 0; i < len; i++)
{
tree parm_type;
tree attributes;
if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm))
/* Rename the parameter to include the index. */
DECL_NAME (decl_parm) =
make_ith_pack_parameter_name (DECL_NAME (pattern_parm), i);
parm_type = TREE_VEC_ELT (expanded_types, i);
parm_type = type_decays_to (parm_type);
if (!same_type_p (TREE_TYPE (decl_parm), parm_type))
TREE_TYPE (decl_parm) = parm_type;
attributes = DECL_ATTRIBUTES (pattern_parm);
if (DECL_ATTRIBUTES (decl_parm) != attributes)
{
DECL_ATTRIBUTES (decl_parm) = attributes;
cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0);
}
decl_parm = DECL_CHAIN (decl_parm);
}
}
/* Merge additional specifiers from the CODE_PATTERN. */
if (DECL_DECLARED_INLINE_P (code_pattern)
&& !DECL_DECLARED_INLINE_P (decl))
DECL_DECLARED_INLINE_P (decl) = 1;
maybe_instantiate_noexcept (decl, tf_error);
}
else if (VAR_P (decl))
{
start_lambda_scope (decl);
DECL_INITIAL (decl) =
tsubst_init (DECL_INITIAL (code_pattern), decl, args,
tf_error, DECL_TI_TEMPLATE (decl));
finish_lambda_scope ();
if (VAR_HAD_UNKNOWN_BOUND (decl))
TREE_TYPE (decl) = tsubst (TREE_TYPE (code_pattern), args,
tf_error, DECL_TI_TEMPLATE (decl));
}
else
gcc_unreachable ();
pop_access_scope (decl);
}
/* Return the TEMPLATE_DECL into which DECL_TI_ARGS(DECL) should be
substituted to get DECL. */
tree
template_for_substitution (tree decl)
{
tree tmpl = DECL_TI_TEMPLATE (decl);
/* Set TMPL to the template whose DECL_TEMPLATE_RESULT is the pattern
for the instantiation. This is not always the most general
template. Consider, for example:
template <class T>
struct S { template <class U> void f();
template <> void f<int>(); };
and an instantiation of S<double>::f<int>. We want TD to be the
specialization S<T>::f<int>, not the more general S<T>::f<U>. */
while (/* An instantiation cannot have a definition, so we need a
more general template. */
DECL_TEMPLATE_INSTANTIATION (tmpl)
/* We must also deal with friend templates. Given:
template <class T> struct S {
template <class U> friend void f() {};
};
S<int>::f<U> say, is not an instantiation of S<T>::f<U>,
so far as the language is concerned, but that's still
where we get the pattern for the instantiation from. On
other hand, if the definition comes outside the class, say:
template <class T> struct S {
template <class U> friend void f();
};
template <class U> friend void f() {}
we don't need to look any further. That's what the check for
DECL_INITIAL is for. */
|| (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (tmpl)
&& !DECL_INITIAL (DECL_TEMPLATE_RESULT (tmpl))))
{
/* The present template, TD, should not be a definition. If it
were a definition, we should be using it! Note that we
cannot restructure the loop to just keep going until we find
a template with a definition, since that might go too far if
a specialization was declared, but not defined. */
/* Fetch the more general template. */
tmpl = DECL_TI_TEMPLATE (tmpl);
}
return tmpl;
}
/* Returns true if we need to instantiate this template instance even if we
know we aren't going to emit it. */
bool
always_instantiate_p (tree decl)
{
/* We always instantiate inline functions so that we can inline them. An
explicit instantiation declaration prohibits implicit instantiation of
non-inline functions. With high levels of optimization, we would
normally inline non-inline functions -- but we're not allowed to do
that for "extern template" functions. Therefore, we check
DECL_DECLARED_INLINE_P, rather than possibly_inlined_p. */
return ((TREE_CODE (decl) == FUNCTION_DECL
&& (DECL_DECLARED_INLINE_P (decl)
|| type_uses_auto (TREE_TYPE (TREE_TYPE (decl)))))
/* And we need to instantiate static data members so that
their initializers are available in integral constant
expressions. */
|| (VAR_P (decl)
&& decl_maybe_constant_var_p (decl)));
}
/* If FN has a noexcept-specifier that hasn't been instantiated yet,
instantiate it now, modifying TREE_TYPE (fn). Returns false on
error, true otherwise. */
bool
maybe_instantiate_noexcept (tree fn, tsubst_flags_t complain)
{
tree fntype, spec, noex, clone;
if (fn == error_mark_node)
return false;
/* Don't instantiate a noexcept-specification from template context. */
if (processing_template_decl
&& (!flag_noexcept_type || type_dependent_expression_p (fn)))
return true;
if (DECL_MAYBE_DELETED (fn))
{
if (fn == current_function_decl)
/* We're in start_preparsed_function, keep going. */
return true;
++function_depth;
synthesize_method (fn);
--function_depth;
return !DECL_MAYBE_DELETED (fn);
}
if (DECL_CLONED_FUNCTION_P (fn))
fn = DECL_CLONED_FUNCTION (fn);
tree orig_fn = NULL_TREE;
/* For a member friend template we can get a TEMPLATE_DECL. Let's use
its FUNCTION_DECL for the rest of this function -- push_access_scope
doesn't accept TEMPLATE_DECLs. */
if (DECL_FUNCTION_TEMPLATE_P (fn))
{
orig_fn = fn;
fn = DECL_TEMPLATE_RESULT (fn);
}
fntype = TREE_TYPE (fn);
spec = TYPE_RAISES_EXCEPTIONS (fntype);
if (!spec || !TREE_PURPOSE (spec))
return true;
noex = TREE_PURPOSE (spec);
if (TREE_CODE (noex) == DEFERRED_NOEXCEPT)
{
static hash_set<tree>* fns = new hash_set<tree>;
bool added = false;
if (DEFERRED_NOEXCEPT_PATTERN (noex) == NULL_TREE)
{
spec = get_defaulted_eh_spec (fn, complain);
if (spec == error_mark_node)
/* This might have failed because of an unparsed DMI, so
let's try again later. */
return false;
}
else if (!(added = !fns->add (fn)))
{
/* If hash_set::add returns true, the element was already there. */
location_t loc = cp_expr_loc_or_loc (DEFERRED_NOEXCEPT_PATTERN (noex),
DECL_SOURCE_LOCATION (fn));
error_at (loc,
"exception specification of %qD depends on itself",
fn);
spec = noexcept_false_spec;
}
else if (push_tinst_level (fn))
{
push_to_top_level ();
push_access_scope (fn);
push_deferring_access_checks (dk_no_deferred);
input_location = DECL_SOURCE_LOCATION (fn);
/* If needed, set current_class_ptr for the benefit of
tsubst_copy/PARM_DECL. */
tree tdecl = DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (fn));
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (tdecl))
{
tree this_parm = DECL_ARGUMENTS (tdecl);
current_class_ptr = NULL_TREE;
current_class_ref = cp_build_fold_indirect_ref (this_parm);
current_class_ptr = this_parm;
}
/* If this function is represented by a TEMPLATE_DECL, then
the deferred noexcept-specification might still contain
dependent types, even after substitution. And we need the
dependency check functions to work in build_noexcept_spec. */
if (orig_fn)
++processing_template_decl;
/* Do deferred instantiation of the noexcept-specifier. */
noex = tsubst_copy_and_build (DEFERRED_NOEXCEPT_PATTERN (noex),
DEFERRED_NOEXCEPT_ARGS (noex),
tf_warning_or_error, fn,
/*function_p=*/false,
/*i_c_e_p=*/true);
/* Build up the noexcept-specification. */
spec = build_noexcept_spec (noex, tf_warning_or_error);
if (orig_fn)
--processing_template_decl;
pop_deferring_access_checks ();
pop_access_scope (fn);
pop_tinst_level ();
pop_from_top_level ();
}
else
spec = noexcept_false_spec;
if (added)
fns->remove (fn);
if (spec == error_mark_node)
{
/* This failed with a hard error, so let's go with false. */
gcc_assert (seen_error ());
spec = noexcept_false_spec;
}
TREE_TYPE (fn) = build_exception_variant (fntype, spec);
if (orig_fn)
TREE_TYPE (orig_fn) = TREE_TYPE (fn);
}
FOR_EACH_CLONE (clone, fn)
{
if (TREE_TYPE (clone) == fntype)
TREE_TYPE (clone) = TREE_TYPE (fn);
else
TREE_TYPE (clone) = build_exception_variant (TREE_TYPE (clone), spec);
}
return true;
}
/* We're starting to process the function INST, an instantiation of PATTERN;
add their parameters to local_specializations. */
static void
register_parameter_specializations (tree pattern, tree inst)
{
tree tmpl_parm = DECL_ARGUMENTS (pattern);
tree spec_parm = DECL_ARGUMENTS (inst);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (inst))
{
register_local_specialization (spec_parm, tmpl_parm);
spec_parm = skip_artificial_parms_for (inst, spec_parm);
tmpl_parm = skip_artificial_parms_for (pattern, tmpl_parm);
}
for (; tmpl_parm; tmpl_parm = DECL_CHAIN (tmpl_parm))
{
if (!DECL_PACK_P (tmpl_parm))
{
register_local_specialization (spec_parm, tmpl_parm);
spec_parm = DECL_CHAIN (spec_parm);
}
else
{
/* Register the (value) argument pack as a specialization of
TMPL_PARM, then move on. */
tree argpack = extract_fnparm_pack (tmpl_parm, &spec_parm);
register_local_specialization (argpack, tmpl_parm);
}
}
gcc_assert (!spec_parm);
}
/* Produce the definition of D, a _DECL generated from a template. If
DEFER_OK is true, then we don't have to actually do the
instantiation now; we just have to do it sometime. Normally it is
an error if this is an explicit instantiation but D is undefined.
EXPL_INST_CLASS_MEM_P is true iff D is a member of an explicitly
instantiated class template. */
tree
instantiate_decl (tree d, bool defer_ok, bool expl_inst_class_mem_p)
{
tree tmpl = DECL_TI_TEMPLATE (d);
tree gen_args;
tree args;
tree td;
tree code_pattern;
tree spec;
tree gen_tmpl;
bool pattern_defined;
location_t saved_loc = input_location;
int saved_unevaluated_operand = cp_unevaluated_operand;
int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
bool external_p;
bool deleted_p;
/* This function should only be used to instantiate templates for
functions and static member variables. */
gcc_assert (VAR_OR_FUNCTION_DECL_P (d));
/* A concept is never instantiated. */
gcc_assert (!DECL_DECLARED_CONCEPT_P (d));
/* Variables are never deferred; if instantiation is required, they
are instantiated right away. That allows for better code in the
case that an expression refers to the value of the variable --
if the variable has a constant value the referring expression can
take advantage of that fact. */
if (VAR_P (d))
defer_ok = false;
/* Don't instantiate cloned functions. Instead, instantiate the
functions they cloned. */
if (TREE_CODE (d) == FUNCTION_DECL && DECL_CLONED_FUNCTION_P (d))
d = DECL_CLONED_FUNCTION (d);
if (DECL_TEMPLATE_INSTANTIATED (d)
|| (TREE_CODE (d) == FUNCTION_DECL
&& DECL_DEFAULTED_FN (d) && DECL_INITIAL (d))
|| DECL_TEMPLATE_SPECIALIZATION (d))
/* D has already been instantiated or explicitly specialized, so
there's nothing for us to do here.
It might seem reasonable to check whether or not D is an explicit
instantiation, and, if so, stop here. But when an explicit
instantiation is deferred until the end of the compilation,
DECL_EXPLICIT_INSTANTIATION is set, even though we still need to do
the instantiation. */
return d;
/* Check to see whether we know that this template will be
instantiated in some other file, as with "extern template"
extension. */
external_p = (DECL_INTERFACE_KNOWN (d) && DECL_REALLY_EXTERN (d));
/* In general, we do not instantiate such templates. */
if (external_p && !always_instantiate_p (d))
return d;
gen_tmpl = most_general_template (tmpl);
gen_args = DECL_TI_ARGS (d);
if (tmpl != gen_tmpl)
/* We should already have the extra args. */
gcc_assert (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl))
== TMPL_ARGS_DEPTH (gen_args));
/* And what's in the hash table should match D. */
gcc_assert ((spec = retrieve_specialization (gen_tmpl, gen_args, 0)) == d
|| spec == NULL_TREE);
/* This needs to happen before any tsubsting. */
if (! push_tinst_level (d))
return d;
timevar_push (TV_TEMPLATE_INST);
/* Set TD to the template whose DECL_TEMPLATE_RESULT is the pattern
for the instantiation. */
td = template_for_substitution (d);
args = gen_args;
if (VAR_P (d))
{
/* Look up an explicit specialization, if any. */
tree tid = lookup_template_variable (gen_tmpl, gen_args);
tree elt = most_specialized_partial_spec (tid, tf_warning_or_error);
if (elt && elt != error_mark_node)
{
td = TREE_VALUE (elt);
args = TREE_PURPOSE (elt);
}
}
code_pattern = DECL_TEMPLATE_RESULT (td);
/* We should never be trying to instantiate a member of a class
template or partial specialization. */
gcc_assert (d != code_pattern);
if ((DECL_NAMESPACE_SCOPE_P (d) && !DECL_INITIALIZED_IN_CLASS_P (d))
|| DECL_TEMPLATE_SPECIALIZATION (td))
/* In the case of a friend template whose definition is provided
outside the class, we may have too many arguments. Drop the
ones we don't need. The same is true for specializations. */
args = get_innermost_template_args
(args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td)));
if (TREE_CODE (d) == FUNCTION_DECL)
{
deleted_p = DECL_DELETED_FN (code_pattern);
pattern_defined = ((DECL_SAVED_TREE (code_pattern) != NULL_TREE
&& DECL_INITIAL (code_pattern) != error_mark_node)
|| DECL_DEFAULTED_FN (code_pattern)
|| deleted_p);
}
else
{
deleted_p = false;
if (DECL_CLASS_SCOPE_P (code_pattern))
pattern_defined = ! DECL_IN_AGGR_P (code_pattern);
else
pattern_defined = ! DECL_EXTERNAL (code_pattern);
}
/* We may be in the middle of deferred access check. Disable it now. */
push_deferring_access_checks (dk_no_deferred);
/* Unless an explicit instantiation directive has already determined
the linkage of D, remember that a definition is available for
this entity. */
if (pattern_defined
&& !DECL_INTERFACE_KNOWN (d)
&& !DECL_NOT_REALLY_EXTERN (d))
mark_definable (d);
DECL_SOURCE_LOCATION (td) = DECL_SOURCE_LOCATION (code_pattern);
DECL_SOURCE_LOCATION (d) = DECL_SOURCE_LOCATION (code_pattern);
input_location = DECL_SOURCE_LOCATION (d);
/* If D is a member of an explicitly instantiated class template,
and no definition is available, treat it like an implicit
instantiation. */
if (!pattern_defined && expl_inst_class_mem_p
&& DECL_EXPLICIT_INSTANTIATION (d))
{
/* Leave linkage flags alone on instantiations with anonymous
visibility. */
if (TREE_PUBLIC (d))
{
DECL_NOT_REALLY_EXTERN (d) = 0;
DECL_INTERFACE_KNOWN (d) = 0;
}
SET_DECL_IMPLICIT_INSTANTIATION (d);
}
/* Defer all other templates, unless we have been explicitly
forbidden from doing so. */
if (/* If there is no definition, we cannot instantiate the
template. */
! pattern_defined
/* If it's OK to postpone instantiation, do so. */
|| defer_ok
/* If this is a static data member that will be defined
elsewhere, we don't want to instantiate the entire data
member, but we do want to instantiate the initializer so that
we can substitute that elsewhere. */
|| (external_p && VAR_P (d))
/* Handle here a deleted function too, avoid generating
its body (c++/61080). */
|| deleted_p)
{
/* The definition of the static data member is now required so
we must substitute the initializer. */
if (VAR_P (d)
&& !DECL_INITIAL (d)
&& DECL_INITIAL (code_pattern))
{
tree ns;
tree init;
bool const_init = false;
bool enter_context = DECL_CLASS_SCOPE_P (d);
ns = decl_namespace_context (d);
push_nested_namespace (ns);
if (enter_context)
push_nested_class (DECL_CONTEXT (d));
init = tsubst_expr (DECL_INITIAL (code_pattern),
args,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/false);
/* If instantiating the initializer involved instantiating this
again, don't call cp_finish_decl twice. */
if (!DECL_INITIAL (d))
{
/* Make sure the initializer is still constant, in case of
circular dependency (template/instantiate6.C). */
const_init
= DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern);
cp_finish_decl (d, init, /*init_const_expr_p=*/const_init,
/*asmspec_tree=*/NULL_TREE,
LOOKUP_ONLYCONVERTING);
}
if (enter_context)
pop_nested_class ();
pop_nested_namespace (ns);
}
/* We restore the source position here because it's used by
add_pending_template. */
input_location = saved_loc;
if (at_eof && !pattern_defined
&& DECL_EXPLICIT_INSTANTIATION (d)
&& DECL_NOT_REALLY_EXTERN (d))
/* [temp.explicit]
The definition of a non-exported function template, a
non-exported member function template, or a non-exported
member function or static data member of a class template
shall be present in every translation unit in which it is
explicitly instantiated. */
permerror (input_location, "explicit instantiation of %qD "
"but no definition available", d);
/* If we're in unevaluated context, we just wanted to get the
constant value; this isn't an odr use, so don't queue
a full instantiation. */
if (cp_unevaluated_operand != 0)
goto out;
/* ??? Historically, we have instantiated inline functions, even
when marked as "extern template". */
if (!(external_p && VAR_P (d)))
add_pending_template (d);
goto out;
}
bool push_to_top, nested;
tree fn_context;
fn_context = decl_function_context (d);
if (LAMBDA_FUNCTION_P (d))
/* tsubst_lambda_expr resolved any references to enclosing functions. */
fn_context = NULL_TREE;
nested = current_function_decl != NULL_TREE;
push_to_top = !(nested && fn_context == current_function_decl);
vec<tree> omp_privatization_save;
if (nested)
save_omp_privatization_clauses (omp_privatization_save);
if (push_to_top)
push_to_top_level ();
else
{
gcc_assert (!processing_template_decl);
push_function_context ();
cp_unevaluated_operand = 0;
c_inhibit_evaluation_warnings = 0;
}
if (VAR_P (d))
{
/* The variable might be a lambda's extra scope, and that
lambda's visibility depends on D's. */
maybe_commonize_var (d);
determine_visibility (d);
}
/* Mark D as instantiated so that recursive calls to
instantiate_decl do not try to instantiate it again. */
DECL_TEMPLATE_INSTANTIATED (d) = 1;
/* Regenerate the declaration in case the template has been modified
by a subsequent redeclaration. */
regenerate_decl_from_template (d, td, args);
/* We already set the file and line above. Reset them now in case
they changed as a result of calling regenerate_decl_from_template. */
input_location = DECL_SOURCE_LOCATION (d);
if (VAR_P (d))
{
tree init;
bool const_init = false;
/* Clear out DECL_RTL; whatever was there before may not be right
since we've reset the type of the declaration. */
SET_DECL_RTL (d, NULL);
DECL_IN_AGGR_P (d) = 0;
/* The initializer is placed in DECL_INITIAL by
regenerate_decl_from_template so we don't need to
push/pop_access_scope again here. Pull it out so that
cp_finish_decl can process it. */
init = DECL_INITIAL (d);
DECL_INITIAL (d) = NULL_TREE;
DECL_INITIALIZED_P (d) = 0;
/* Clear DECL_EXTERNAL so that cp_finish_decl will process the
initializer. That function will defer actual emission until
we have a chance to determine linkage. */
DECL_EXTERNAL (d) = 0;
/* Enter the scope of D so that access-checking works correctly. */
bool enter_context = DECL_CLASS_SCOPE_P (d);
if (enter_context)
push_nested_class (DECL_CONTEXT (d));
const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern);
int flags = (TINFO_VAR_DECLARED_CONSTINIT (DECL_TEMPLATE_INFO (d))
? LOOKUP_CONSTINIT : 0);
cp_finish_decl (d, init, const_init, NULL_TREE, flags);
if (enter_context)
pop_nested_class ();
if (variable_template_p (gen_tmpl))
note_variable_template_instantiation (d);
}
else if (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (code_pattern))
synthesize_method (d);
else if (TREE_CODE (d) == FUNCTION_DECL)
{
/* Set up the list of local specializations. */
local_specialization_stack lss (push_to_top ? lss_blank : lss_copy);
tree block = NULL_TREE;
/* Set up context. */
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)
&& TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL)
block = push_stmt_list ();
else
start_preparsed_function (d, NULL_TREE, SF_PRE_PARSED);
/* Some typedefs referenced from within the template code need to be
access checked at template instantiation time, i.e now. These
types were added to the template at parsing time. Let's get those
and perform the access checks then. */
perform_typedefs_access_check (DECL_TEMPLATE_RESULT (td),
args);
/* Create substitution entries for the parameters. */
register_parameter_specializations (code_pattern, d);
/* Substitute into the body of the function. */
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern))
tsubst_omp_udr (DECL_SAVED_TREE (code_pattern), args,
tf_warning_or_error, tmpl);
else
{
tsubst_expr (DECL_SAVED_TREE (code_pattern), args,
tf_warning_or_error, tmpl,
/*integral_constant_expression_p=*/false);
/* Set the current input_location to the end of the function
so that finish_function knows where we are. */
input_location
= DECL_STRUCT_FUNCTION (code_pattern)->function_end_locus;
/* Remember if we saw an infinite loop in the template. */
current_function_infinite_loop
= DECL_STRUCT_FUNCTION (code_pattern)->language->infinite_loop;
}
/* Finish the function. */
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)
&& TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL)
DECL_SAVED_TREE (d) = pop_stmt_list (block);
else
{
d = finish_function (/*inline_p=*/false);
expand_or_defer_fn (d);
}
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern))
cp_check_omp_declare_reduction (d);
}
/* We're not deferring instantiation any more. */
TI_PENDING_TEMPLATE_FLAG (DECL_TEMPLATE_INFO (d)) = 0;
if (push_to_top)
pop_from_top_level ();
else
pop_function_context ();
if (nested)
restore_omp_privatization_clauses (omp_privatization_save);
out:
pop_deferring_access_checks ();
timevar_pop (TV_TEMPLATE_INST);
pop_tinst_level ();
input_location = saved_loc;
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
return d;
}
/* Run through the list of templates that we wish we could
instantiate, and instantiate any we can. RETRIES is the
number of times we retry pending template instantiation. */
void
instantiate_pending_templates (int retries)
{
int reconsider;
location_t saved_loc = input_location;
/* Instantiating templates may trigger vtable generation. This in turn
may require further template instantiations. We place a limit here
to avoid infinite loop. */
if (pending_templates && retries >= max_tinst_depth)
{
tree decl = pending_templates->tinst->maybe_get_node ();
fatal_error (input_location,
"template instantiation depth exceeds maximum of %d"
" instantiating %q+D, possibly from virtual table generation"
" (use %<-ftemplate-depth=%> to increase the maximum)",
max_tinst_depth, decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
/* Pretend that we defined it. */
DECL_INITIAL (decl) = error_mark_node;
return;
}
do
{
struct pending_template **t = &pending_templates;
struct pending_template *last = NULL;
reconsider = 0;
while (*t)
{
tree instantiation = reopen_tinst_level ((*t)->tinst);
bool complete = false;
if (TYPE_P (instantiation))
{
if (!COMPLETE_TYPE_P (instantiation))
{
instantiate_class_template (instantiation);
if (CLASSTYPE_TEMPLATE_INSTANTIATION (instantiation))
for (tree fld = TYPE_FIELDS (instantiation);
fld; fld = TREE_CHAIN (fld))
if ((VAR_P (fld)
|| (TREE_CODE (fld) == FUNCTION_DECL
&& !DECL_ARTIFICIAL (fld)))
&& DECL_TEMPLATE_INSTANTIATION (fld))
instantiate_decl (fld,
/*defer_ok=*/false,
/*expl_inst_class_mem_p=*/false);
if (COMPLETE_TYPE_P (instantiation))
reconsider = 1;
}
complete = COMPLETE_TYPE_P (instantiation);
}
else
{
if (!DECL_TEMPLATE_SPECIALIZATION (instantiation)
&& !DECL_TEMPLATE_INSTANTIATED (instantiation))
{
instantiation
= instantiate_decl (instantiation,
/*defer_ok=*/false,
/*expl_inst_class_mem_p=*/false);
if (DECL_TEMPLATE_INSTANTIATED (instantiation))
reconsider = 1;
}
complete = (DECL_TEMPLATE_SPECIALIZATION (instantiation)
|| DECL_TEMPLATE_INSTANTIATED (instantiation));
}
if (complete)
{
/* If INSTANTIATION has been instantiated, then we don't
need to consider it again in the future. */
struct pending_template *drop = *t;
*t = (*t)->next;
set_refcount_ptr (drop->tinst);
pending_template_freelist ().free (drop);
}
else
{
last = *t;
t = &(*t)->next;
}
tinst_depth = 0;
set_refcount_ptr (current_tinst_level);
}
last_pending_template = last;
}
while (reconsider);
input_location = saved_loc;
}
/* Substitute ARGVEC into T, which is a list of initializers for
either base class or a non-static data member. The TREE_PURPOSEs
are DECLs, and the TREE_VALUEs are the initializer values. Used by
instantiate_decl. */
static tree
tsubst_initializer_list (tree t, tree argvec)
{
tree inits = NULL_TREE;
tree target_ctor = error_mark_node;
for (; t; t = TREE_CHAIN (t))
{
tree decl;
tree init;
tree expanded_bases = NULL_TREE;
tree expanded_arguments = NULL_TREE;
int i, len = 1;
if (TREE_CODE (TREE_PURPOSE (t)) == TYPE_PACK_EXPANSION)
{
tree expr;
tree arg;
/* Expand the base class expansion type into separate base
classes. */
expanded_bases = tsubst_pack_expansion (TREE_PURPOSE (t), argvec,
tf_warning_or_error,
NULL_TREE);
if (expanded_bases == error_mark_node)
continue;
/* We'll be building separate TREE_LISTs of arguments for
each base. */
len = TREE_VEC_LENGTH (expanded_bases);
expanded_arguments = make_tree_vec (len);
for (i = 0; i < len; i++)
TREE_VEC_ELT (expanded_arguments, i) = NULL_TREE;
/* Build a dummy EXPR_PACK_EXPANSION that will be used to
expand each argument in the TREE_VALUE of t. */
expr = make_node (EXPR_PACK_EXPANSION);
PACK_EXPANSION_LOCAL_P (expr) = true;
PACK_EXPANSION_PARAMETER_PACKS (expr) =
PACK_EXPANSION_PARAMETER_PACKS (TREE_PURPOSE (t));
if (TREE_VALUE (t) == void_type_node)
/* VOID_TYPE_NODE is used to indicate
value-initialization. */
{
for (i = 0; i < len; i++)
TREE_VEC_ELT (expanded_arguments, i) = void_type_node;
}
else
{
/* Substitute parameter packs into each argument in the
TREE_LIST. */
in_base_initializer = 1;
for (arg = TREE_VALUE (t); arg; arg = TREE_CHAIN (arg))
{
tree expanded_exprs;
/* Expand the argument. */
SET_PACK_EXPANSION_PATTERN (expr, TREE_VALUE (arg));
expanded_exprs
= tsubst_pack_expansion (expr, argvec,
tf_warning_or_error,
NULL_TREE);
if (expanded_exprs == error_mark_node)
continue;
/* Prepend each of the expanded expressions to the
corresponding TREE_LIST in EXPANDED_ARGUMENTS. */
for (i = 0; i < len; i++)
{
TREE_VEC_ELT (expanded_arguments, i) =
tree_cons (NULL_TREE,
TREE_VEC_ELT (expanded_exprs, i),
TREE_VEC_ELT (expanded_arguments, i));
}
}
in_base_initializer = 0;
/* Reverse all of the TREE_LISTs in EXPANDED_ARGUMENTS,
since we built them backwards. */
for (i = 0; i < len; i++)
{
TREE_VEC_ELT (expanded_arguments, i) =
nreverse (TREE_VEC_ELT (expanded_arguments, i));
}
}
}
for (i = 0; i < len; ++i)
{
if (expanded_bases)
{
decl = TREE_VEC_ELT (expanded_bases, i);
decl = expand_member_init (decl);
init = TREE_VEC_ELT (expanded_arguments, i);
}
else
{
tree tmp;
decl = tsubst_copy (TREE_PURPOSE (t), argvec,
tf_warning_or_error, NULL_TREE);
decl = expand_member_init (decl);
if (decl && !DECL_P (decl))
in_base_initializer = 1;
init = TREE_VALUE (t);
tmp = init;
if (init != void_type_node)
init = tsubst_expr (init, argvec,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/false);
if (init == NULL_TREE && tmp != NULL_TREE)
/* If we had an initializer but it instantiated to nothing,
value-initialize the object. This will only occur when
the initializer was a pack expansion where the parameter
packs used in that expansion were of length zero. */
init = void_type_node;
in_base_initializer = 0;
}
if (target_ctor != error_mark_node
&& init != error_mark_node)
{
error ("mem-initializer for %qD follows constructor delegation",
decl);
return inits;
}
/* Look for a target constructor. */
if (init != error_mark_node
&& decl && CLASS_TYPE_P (decl)
&& same_type_p (decl, current_class_type))
{
maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS);
if (inits)
{
error ("constructor delegation follows mem-initializer for %qD",
TREE_PURPOSE (inits));
continue;
}
target_ctor = init;
}
if (decl)
{
init = build_tree_list (decl, init);
TREE_CHAIN (init) = inits;
inits = init;
}
}
}
return inits;
}
/* Set CURRENT_ACCESS_SPECIFIER based on the protection of DECL. */
static void
set_current_access_from_decl (tree decl)
{
if (TREE_PRIVATE (decl))
current_access_specifier = access_private_node;
else if (TREE_PROTECTED (decl))
current_access_specifier = access_protected_node;
else
current_access_specifier = access_public_node;
}
/* Instantiate an enumerated type. TAG is the template type, NEWTAG
is the instantiation (which should have been created with
start_enum) and ARGS are the template arguments to use. */
static void
tsubst_enum (tree tag, tree newtag, tree args)
{
tree e;
if (SCOPED_ENUM_P (newtag))
begin_scope (sk_scoped_enum, newtag);
for (e = TYPE_VALUES (tag); e; e = TREE_CHAIN (e))
{
tree value;
tree decl;
decl = TREE_VALUE (e);
/* Note that in a template enum, the TREE_VALUE is the
CONST_DECL, not the corresponding INTEGER_CST. */
value = tsubst_expr (DECL_INITIAL (decl),
args, tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/true);
/* Give this enumeration constant the correct access. */
set_current_access_from_decl (decl);
/* Actually build the enumerator itself. Here we're assuming that
enumerators can't have dependent attributes. */
build_enumerator (DECL_NAME (decl), value, newtag,
DECL_ATTRIBUTES (decl), DECL_SOURCE_LOCATION (decl));
}
if (SCOPED_ENUM_P (newtag))
finish_scope ();
finish_enum_value_list (newtag);
finish_enum (newtag);
DECL_SOURCE_LOCATION (TYPE_NAME (newtag))
= DECL_SOURCE_LOCATION (TYPE_NAME (tag));
}
/* DECL is a FUNCTION_DECL that is a template specialization. Return
its type -- but without substituting the innermost set of template
arguments. So, innermost set of template parameters will appear in
the type. */
tree
get_mostly_instantiated_function_type (tree decl)
{
/* For a function, DECL_TI_TEMPLATE is partially instantiated. */
return TREE_TYPE (DECL_TI_TEMPLATE (decl));
}
/* Return truthvalue if we're processing a template different from
the last one involved in diagnostics. */
bool
problematic_instantiation_changed (void)
{
return current_tinst_level != last_error_tinst_level;
}
/* Remember current template involved in diagnostics. */
void
record_last_problematic_instantiation (void)
{
set_refcount_ptr (last_error_tinst_level, current_tinst_level);
}
struct tinst_level *
current_instantiation (void)
{
return current_tinst_level;
}
/* Return TRUE if current_function_decl is being instantiated, false
otherwise. */
bool
instantiating_current_function_p (void)
{
return (current_instantiation ()
&& (current_instantiation ()->maybe_get_node ()
== current_function_decl));
}
/* [temp.param] Check that template non-type parm TYPE is of an allowable
type. Return false for ok, true for disallowed. Issue error and
inform messages under control of COMPLAIN. */
static bool
invalid_nontype_parm_type_p (tree type, tsubst_flags_t complain)
{
if (INTEGRAL_OR_ENUMERATION_TYPE_P (type))
return false;
else if (TYPE_PTR_P (type))
return false;
else if (TYPE_REF_P (type)
&& !TYPE_REF_IS_RVALUE (type))
return false;
else if (TYPE_PTRMEM_P (type))
return false;
else if (TREE_CODE (type) == TEMPLATE_TYPE_PARM)
{
if (CLASS_PLACEHOLDER_TEMPLATE (type) && cxx_dialect < cxx2a)
{
if (complain & tf_error)
error ("non-type template parameters of deduced class type only "
"available with %<-std=c++2a%> or %<-std=gnu++2a%>");
return true;
}
return false;
}
else if (TREE_CODE (type) == TYPENAME_TYPE)
return false;
else if (TREE_CODE (type) == DECLTYPE_TYPE)
return false;
else if (TREE_CODE (type) == NULLPTR_TYPE)
return false;
/* A bound template template parm could later be instantiated to have a valid
nontype parm type via an alias template. */
else if (cxx_dialect >= cxx11
&& TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
return false;
else if (CLASS_TYPE_P (type))
{
if (cxx_dialect < cxx2a)
{
if (complain & tf_error)
error ("non-type template parameters of class type only available "
"with %<-std=c++2a%> or %<-std=gnu++2a%>");
return true;
}
if (dependent_type_p (type))
return false;
if (!complete_type_or_else (type, NULL_TREE))
return true;
if (!structural_type_p (type))
{
if (complain & tf_error)
{
auto_diagnostic_group d;
error ("%qT is not a valid type for a template non-type "
"parameter because it is not structural", type);
structural_type_p (type, true);
}
return true;
}
return false;
}
if (complain & tf_error)
{
if (type == error_mark_node)
inform (input_location, "invalid template non-type parameter");
else
error ("%q#T is not a valid type for a template non-type parameter",
type);
}
return true;
}
/* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type].
Assumes that TYPE really is a type, and not the ERROR_MARK_NODE.*/
static bool
dependent_type_p_r (tree type)
{
tree scope;
/* [temp.dep.type]
A type is dependent if it is:
-- a template parameter. Template template parameters are types
for us (since TYPE_P holds true for them) so we handle
them here. */
if (TREE_CODE (type) == TEMPLATE_TYPE_PARM
|| TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM)
return true;
/* -- a qualified-id with a nested-name-specifier which contains a
class-name that names a dependent type or whose unqualified-id
names a dependent type. */
if (TREE_CODE (type) == TYPENAME_TYPE)
return true;
/* An alias template specialization can be dependent even if the
resulting type is not. */
if (dependent_alias_template_spec_p (type, nt_transparent))
return true;
/* -- a cv-qualified type where the cv-unqualified type is
dependent.
No code is necessary for this bullet; the code below handles
cv-qualified types, and we don't want to strip aliases with
TYPE_MAIN_VARIANT because of DR 1558. */
/* -- a compound type constructed from any dependent type. */
if (TYPE_PTRMEM_P (type))
return (dependent_type_p (TYPE_PTRMEM_CLASS_TYPE (type))
|| dependent_type_p (TYPE_PTRMEM_POINTED_TO_TYPE
(type)));
else if (INDIRECT_TYPE_P (type))
return dependent_type_p (TREE_TYPE (type));
else if (FUNC_OR_METHOD_TYPE_P (type))
{
tree arg_type;
if (dependent_type_p (TREE_TYPE (type)))
return true;
for (arg_type = TYPE_ARG_TYPES (type);
arg_type;
arg_type = TREE_CHAIN (arg_type))
if (dependent_type_p (TREE_VALUE (arg_type)))
return true;
if (cxx_dialect >= cxx17)
/* A value-dependent noexcept-specifier makes the type dependent. */
if (tree spec = TYPE_RAISES_EXCEPTIONS (type))
if (tree noex = TREE_PURPOSE (spec))
/* Treat DEFERRED_NOEXCEPT as non-dependent, since it doesn't
affect overload resolution and treating it as dependent breaks
things. Same for an unparsed noexcept expression. */
if (TREE_CODE (noex) != DEFERRED_NOEXCEPT
&& TREE_CODE (noex) != DEFERRED_PARSE
&& value_dependent_expression_p (noex))
return true;
return false;
}
/* -- an array type constructed from any dependent type or whose
size is specified by a constant expression that is
value-dependent.
We checked for type- and value-dependence of the bounds in
compute_array_index_type, so TYPE_DEPENDENT_P is already set. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (type)
&& dependent_type_p (TYPE_DOMAIN (type)))
return true;
return dependent_type_p (TREE_TYPE (type));
}
/* -- a template-id in which either the template name is a template
parameter ... */
if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
return true;
/* ... or any of the template arguments is a dependent type or
an expression that is type-dependent or value-dependent. */
else if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type)
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type)))))
return true;
/* All TYPEOF_TYPEs, DECLTYPE_TYPEs, and UNDERLYING_TYPEs are
dependent; if the argument of the `typeof' expression is not
type-dependent, then it should already been have resolved. */
if (TREE_CODE (type) == TYPEOF_TYPE
|| TREE_CODE (type) == DECLTYPE_TYPE
|| TREE_CODE (type) == UNDERLYING_TYPE)
return true;
/* A template argument pack is dependent if any of its packed
arguments are. */
if (TREE_CODE (type) == TYPE_ARGUMENT_PACK)
{
tree args = ARGUMENT_PACK_ARGS (type);
int i, len = TREE_VEC_LENGTH (args);
for (i = 0; i < len; ++i)
if (dependent_template_arg_p (TREE_VEC_ELT (args, i)))
return true;
}
/* All TYPE_PACK_EXPANSIONs are dependent, because parameter packs must
be template parameters. */
if (TREE_CODE (type) == TYPE_PACK_EXPANSION)
return true;
if (any_dependent_type_attributes_p (TYPE_ATTRIBUTES (type)))
return true;
/* The standard does not specifically mention types that are local
to template functions or local classes, but they should be
considered dependent too. For example:
template <int I> void f() {
enum E { a = I };
S<sizeof (E)> s;
}
The size of `E' cannot be known until the value of `I' has been
determined. Therefore, `E' must be considered dependent. */
scope = TYPE_CONTEXT (type);
if (scope && TYPE_P (scope))
return dependent_type_p (scope);
/* Don't use type_dependent_expression_p here, as it can lead
to infinite recursion trying to determine whether a lambda
nested in a lambda is dependent (c++/47687). */
else if (scope && TREE_CODE (scope) == FUNCTION_DECL
&& DECL_LANG_SPECIFIC (scope)
&& DECL_TEMPLATE_INFO (scope)
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (scope)))))
return true;
/* Other types are non-dependent. */
return false;
}
/* Returns TRUE if TYPE is dependent, in the sense of
[temp.dep.type]. Note that a NULL type is considered dependent. */
bool
dependent_type_p (tree type)
{
/* If there are no template parameters in scope, then there can't be
any dependent types. */
if (!processing_template_decl)
{
/* If we are not processing a template, then nobody should be
providing us with a dependent type. */
gcc_assert (type);
gcc_assert (TREE_CODE (type) != TEMPLATE_TYPE_PARM || is_auto (type));
return false;
}
/* If the type is NULL, we have not computed a type for the entity
in question; in that case, the type is dependent. */
if (!type)
return true;
/* Erroneous types can be considered non-dependent. */
if (type == error_mark_node)
return false;
/* Getting here with global_type_node means we improperly called this
function on the TREE_TYPE of an IDENTIFIER_NODE. */
gcc_checking_assert (type != global_type_node);
/* If we have not already computed the appropriate value for TYPE,
do so now. */
if (!TYPE_DEPENDENT_P_VALID (type))
{
TYPE_DEPENDENT_P (type) = dependent_type_p_r (type);
TYPE_DEPENDENT_P_VALID (type) = 1;
}
return TYPE_DEPENDENT_P (type);
}
/* Returns TRUE if SCOPE is a dependent scope, in which we can't do any
lookup. In other words, a dependent type that is not the current
instantiation. */
bool
dependent_scope_p (tree scope)
{
return (scope && TYPE_P (scope) && dependent_type_p (scope)
&& !currently_open_class (scope));
}
/* T is a SCOPE_REF. Return whether it represents a non-static member of
an unknown base of 'this' (and is therefore instantiation-dependent). */
static bool
unknown_base_ref_p (tree t)
{
if (!current_class_ptr)
return false;
tree mem = TREE_OPERAND (t, 1);
if (shared_member_p (mem))
return false;
tree cur = current_nonlambda_class_type ();
if (!any_dependent_bases_p (cur))
return false;
tree ctx = TREE_OPERAND (t, 0);
if (DERIVED_FROM_P (ctx, cur))
return false;
return true;
}
/* T is a SCOPE_REF; return whether we need to consider it
instantiation-dependent so that we can check access at instantiation
time even though we know which member it resolves to. */
static bool
instantiation_dependent_scope_ref_p (tree t)
{
if (DECL_P (TREE_OPERAND (t, 1))
&& CLASS_TYPE_P (TREE_OPERAND (t, 0))
&& !unknown_base_ref_p (t)
&& accessible_in_template_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1)))
return false;
else
return true;
}
/* Returns TRUE if the EXPRESSION is value-dependent, in the sense of
[temp.dep.constexpr]. EXPRESSION is already known to be a constant
expression. */
/* Note that this predicate is not appropriate for general expressions;
only constant expressions (that satisfy potential_constant_expression)
can be tested for value dependence. */
bool
value_dependent_expression_p (tree expression)
{
if (!processing_template_decl || expression == NULL_TREE)
return false;
/* A type-dependent expression is also value-dependent. */
if (type_dependent_expression_p (expression))
return true;
switch (TREE_CODE (expression))
{
case BASELINK:
/* A dependent member function of the current instantiation. */
return dependent_type_p (BINFO_TYPE (BASELINK_BINFO (expression)));
case FUNCTION_DECL:
/* A dependent member function of the current instantiation. */
if (DECL_CLASS_SCOPE_P (expression)
&& dependent_type_p (DECL_CONTEXT (expression)))
return true;
break;
case IDENTIFIER_NODE:
/* A name that has not been looked up -- must be dependent. */
return true;
case TEMPLATE_PARM_INDEX:
/* A non-type template parm. */
return true;
case CONST_DECL:
/* A non-type template parm. */
if (DECL_TEMPLATE_PARM_P (expression))
return true;
return value_dependent_expression_p (DECL_INITIAL (expression));
case VAR_DECL:
/* A constant with literal type and is initialized
with an expression that is value-dependent. */
if (DECL_DEPENDENT_INIT_P (expression)
/* FIXME cp_finish_decl doesn't fold reference initializers. */
|| TYPE_REF_P (TREE_TYPE (expression)))
return true;
if (DECL_HAS_VALUE_EXPR_P (expression))
{
tree value_expr = DECL_VALUE_EXPR (expression);
if (value_dependent_expression_p (value_expr)
/* __PRETTY_FUNCTION__ inside a template function is dependent
on the name of the function. */
|| (DECL_PRETTY_FUNCTION_P (expression)
/* It might be used in a template, but not a template
function, in which case its DECL_VALUE_EXPR will be
"top level". */
&& value_expr == error_mark_node))
return true;
}
return false;
case DYNAMIC_CAST_EXPR:
case STATIC_CAST_EXPR:
case CONST_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
/* These expressions are value-dependent if the type to which
the cast occurs is dependent or the expression being casted
is value-dependent. */
{
tree type = TREE_TYPE (expression);
if (dependent_type_p (type))
return true;
/* A functional cast has a list of operands. */
expression = TREE_OPERAND (expression, 0);
if (!expression)
{
/* If there are no operands, it must be an expression such
as "int()". This should not happen for aggregate types
because it would form non-constant expressions. */
gcc_assert (cxx_dialect >= cxx11
|| INTEGRAL_OR_ENUMERATION_TYPE_P (type));
return false;
}
if (TREE_CODE (expression) == TREE_LIST)
return any_value_dependent_elements_p (expression);
return value_dependent_expression_p (expression);
}
case SIZEOF_EXPR:
if (SIZEOF_EXPR_TYPE_P (expression))
return dependent_type_p (TREE_TYPE (TREE_OPERAND (expression, 0)));
/* FALLTHRU */
case ALIGNOF_EXPR:
case TYPEID_EXPR:
/* A `sizeof' expression is value-dependent if the operand is
type-dependent or is a pack expansion. */
expression = TREE_OPERAND (expression, 0);
if (PACK_EXPANSION_P (expression))
return true;
else if (TYPE_P (expression))
return dependent_type_p (expression);
return instantiation_dependent_uneval_expression_p (expression);
case AT_ENCODE_EXPR:
/* An 'encode' expression is value-dependent if the operand is
type-dependent. */
expression = TREE_OPERAND (expression, 0);
return dependent_type_p (expression);
case NOEXCEPT_EXPR:
expression = TREE_OPERAND (expression, 0);
return instantiation_dependent_uneval_expression_p (expression);
case SCOPE_REF:
/* All instantiation-dependent expressions should also be considered
value-dependent. */
return instantiation_dependent_scope_ref_p (expression);
case COMPONENT_REF:
return (value_dependent_expression_p (TREE_OPERAND (expression, 0))
|| value_dependent_expression_p (TREE_OPERAND (expression, 1)));
case NONTYPE_ARGUMENT_PACK:
/* A NONTYPE_ARGUMENT_PACK is value-dependent if any packed argument
is value-dependent. */
{
tree values = ARGUMENT_PACK_ARGS (expression);
int i, len = TREE_VEC_LENGTH (values);
for (i = 0; i < len; ++i)
if (value_dependent_expression_p (TREE_VEC_ELT (values, i)))
return true;
return false;
}
case TRAIT_EXPR:
{
tree type2 = TRAIT_EXPR_TYPE2 (expression);
if (dependent_type_p (TRAIT_EXPR_TYPE1 (expression)))
return true;
if (!type2)
return false;
if (TREE_CODE (type2) != TREE_LIST)
return dependent_type_p (type2);
for (; type2; type2 = TREE_CHAIN (type2))
if (dependent_type_p (TREE_VALUE (type2)))
return true;
return false;
}
case MODOP_EXPR:
return ((value_dependent_expression_p (TREE_OPERAND (expression, 0)))
|| (value_dependent_expression_p (TREE_OPERAND (expression, 2))));
case ARRAY_REF:
return ((value_dependent_expression_p (TREE_OPERAND (expression, 0)))
|| (value_dependent_expression_p (TREE_OPERAND (expression, 1))));
case ADDR_EXPR:
{
tree op = TREE_OPERAND (expression, 0);
return (value_dependent_expression_p (op)
|| has_value_dependent_address (op));
}
case REQUIRES_EXPR:
/* Treat all requires-expressions as value-dependent so
we don't try to fold them. */
return true;
case TYPE_REQ:
return dependent_type_p (TREE_OPERAND (expression, 0));
case CALL_EXPR:
{
if (value_dependent_expression_p (CALL_EXPR_FN (expression)))
return true;
tree fn = get_callee_fndecl (expression);
int i, nargs;
nargs = call_expr_nargs (expression);
for (i = 0; i < nargs; ++i)
{
tree op = CALL_EXPR_ARG (expression, i);
/* In a call to a constexpr member function, look through the
implicit ADDR_EXPR on the object argument so that it doesn't
cause the call to be considered value-dependent. We also
look through it in potential_constant_expression. */
if (i == 0 && fn && DECL_DECLARED_CONSTEXPR_P (fn)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)
&& TREE_CODE (op) == ADDR_EXPR)
op = TREE_OPERAND (op, 0);
if (value_dependent_expression_p (op))
return true;
}
return false;
}
case TEMPLATE_ID_EXPR:
return concept_definition_p (TREE_OPERAND (expression, 0));
case CONSTRUCTOR:
{
unsigned ix;
tree val;
if (dependent_type_p (TREE_TYPE (expression)))
return true;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), ix, val)
if (value_dependent_expression_p (val))
return true;
return false;
}
case STMT_EXPR:
/* Treat a GNU statement expression as dependent to avoid crashing
under instantiate_non_dependent_expr; it can't be constant. */
return true;
default:
/* A constant expression is value-dependent if any subexpression is
value-dependent. */
switch (TREE_CODE_CLASS (TREE_CODE (expression)))
{
case tcc_reference:
case tcc_unary:
case tcc_comparison:
case tcc_binary:
case tcc_expression:
case tcc_vl_exp:
{
int i, len = cp_tree_operand_length (expression);
for (i = 0; i < len; i++)
{
tree t = TREE_OPERAND (expression, i);
/* In some cases, some of the operands may be missing.
(For example, in the case of PREDECREMENT_EXPR, the
amount to increment by may be missing.) That doesn't
make the expression dependent. */
if (t && value_dependent_expression_p (t))
return true;
}
}
break;
default:
break;
}
break;
}
/* The expression is not value-dependent. */
return false;
}
/* Returns TRUE if the EXPRESSION is type-dependent, in the sense of
[temp.dep.expr]. Note that an expression with no type is
considered dependent. Other parts of the compiler arrange for an
expression with type-dependent subexpressions to have no type, so
this function doesn't have to be fully recursive. */
bool
type_dependent_expression_p (tree expression)
{
if (!processing_template_decl)
return false;
if (expression == NULL_TREE || expression == error_mark_node)
return false;
STRIP_ANY_LOCATION_WRAPPER (expression);
/* An unresolved name is always dependent. */
if (identifier_p (expression)
|| TREE_CODE (expression) == USING_DECL
|| TREE_CODE (expression) == WILDCARD_DECL)
return true;
/* A lambda-expression in template context is dependent. dependent_type_p is
true for a lambda in the scope of a class or function template, but that
doesn't cover all template contexts, like a default template argument. */
if (TREE_CODE (expression) == LAMBDA_EXPR)
return true;
/* A fold expression is type-dependent. */
if (TREE_CODE (expression) == UNARY_LEFT_FOLD_EXPR
|| TREE_CODE (expression) == UNARY_RIGHT_FOLD_EXPR
|| TREE_CODE (expression) == BINARY_LEFT_FOLD_EXPR
|| TREE_CODE (expression) == BINARY_RIGHT_FOLD_EXPR)
return true;
/* Some expression forms are never type-dependent. */
if (TREE_CODE (expression) == PSEUDO_DTOR_EXPR
|| TREE_CODE (expression) == SIZEOF_EXPR
|| TREE_CODE (expression) == ALIGNOF_EXPR
|| TREE_CODE (expression) == AT_ENCODE_EXPR
|| TREE_CODE (expression) == NOEXCEPT_EXPR
|| TREE_CODE (expression) == TRAIT_EXPR
|| TREE_CODE (expression) == TYPEID_EXPR
|| TREE_CODE (expression) == DELETE_EXPR
|| TREE_CODE (expression) == VEC_DELETE_EXPR
|| TREE_CODE (expression) == THROW_EXPR
|| TREE_CODE (expression) == REQUIRES_EXPR)
return false;
/* The types of these expressions depends only on the type to which
the cast occurs. */
if (TREE_CODE (expression) == DYNAMIC_CAST_EXPR
|| TREE_CODE (expression) == STATIC_CAST_EXPR
|| TREE_CODE (expression) == CONST_CAST_EXPR
|| TREE_CODE (expression) == REINTERPRET_CAST_EXPR
|| TREE_CODE (expression) == IMPLICIT_CONV_EXPR
|| TREE_CODE (expression) == CAST_EXPR)
return dependent_type_p (TREE_TYPE (expression));
/* The types of these expressions depends only on the type created
by the expression. */
if (TREE_CODE (expression) == NEW_EXPR
|| TREE_CODE (expression) == VEC_NEW_EXPR)
{
/* For NEW_EXPR tree nodes created inside a template, either
the object type itself or a TREE_LIST may appear as the
operand 1. */
tree type = TREE_OPERAND (expression, 1);
if (TREE_CODE (type) == TREE_LIST)
/* This is an array type. We need to check array dimensions
as well. */
return dependent_type_p (TREE_VALUE (TREE_PURPOSE (type)))
|| value_dependent_expression_p
(TREE_OPERAND (TREE_VALUE (type), 1));
else
return dependent_type_p (type);
}
if (TREE_CODE (expression) == SCOPE_REF)
{
tree scope = TREE_OPERAND (expression, 0);
tree name = TREE_OPERAND (expression, 1);
/* 14.6.2.2 [temp.dep.expr]: An id-expression is type-dependent if it
contains an identifier associated by name lookup with one or more
declarations declared with a dependent type, or...a
nested-name-specifier or qualified-id that names a member of an
unknown specialization. */
return (type_dependent_expression_p (name)
|| dependent_scope_p (scope));
}
if (TREE_CODE (expression) == TEMPLATE_DECL
&& !DECL_TEMPLATE_TEMPLATE_PARM_P (expression))
return uses_outer_template_parms (expression);
if (TREE_CODE (expression) == STMT_EXPR)
expression = stmt_expr_value_expr (expression);
if (BRACE_ENCLOSED_INITIALIZER_P (expression))
{
tree elt;
unsigned i;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), i, elt)
{
if (type_dependent_expression_p (elt))
return true;
}
return false;
}
/* A static data member of the current instantiation with incomplete
array type is type-dependent, as the definition and specializations
can have different bounds. */
if (VAR_P (expression)
&& DECL_CLASS_SCOPE_P (expression)
&& dependent_type_p (DECL_CONTEXT (expression))
&& VAR_HAD_UNKNOWN_BOUND (expression))
return true;
/* An array of unknown bound depending on a variadic parameter, eg:
template<typename... Args>
void foo (Args... args)
{
int arr[] = { args... };
}
template<int... vals>
void bar ()
{
int arr[] = { vals... };
}
If the array has no length and has an initializer, it must be that
we couldn't determine its length in cp_complete_array_type because
it is dependent. */
if (VAR_P (expression)
&& TREE_TYPE (expression) != NULL_TREE
&& TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (expression))
&& DECL_INITIAL (expression))
return true;
/* A function or variable template-id is type-dependent if it has any
dependent template arguments. */
if (VAR_OR_FUNCTION_DECL_P (expression)
&& DECL_LANG_SPECIFIC (expression)
&& DECL_TEMPLATE_INFO (expression))
{
/* Consider the innermost template arguments, since those are the ones
that come from the template-id; the template arguments for the
enclosing class do not make it type-dependent unless they are used in
the type of the decl. */
if (instantiates_primary_template_p (expression)
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (expression)))))
return true;
}
/* Otherwise, if the function decl isn't from a dependent scope, it can't be
type-dependent. Checking this is important for functions with auto return
type, which looks like a dependent type. */
if (TREE_CODE (expression) == FUNCTION_DECL
&& !(DECL_CLASS_SCOPE_P (expression)
&& dependent_type_p (DECL_CONTEXT (expression)))
&& !(DECL_LANG_SPECIFIC (expression)
&& DECL_FRIEND_P (expression)
&& (!DECL_FRIEND_CONTEXT (expression)
|| dependent_type_p (DECL_FRIEND_CONTEXT (expression))))
&& !DECL_LOCAL_FUNCTION_P (expression))
{
gcc_assert (!dependent_type_p (TREE_TYPE (expression))
|| undeduced_auto_decl (expression));
return false;
}
/* Always dependent, on the number of arguments if nothing else. */
if (TREE_CODE (expression) == EXPR_PACK_EXPANSION)
return true;
if (TREE_TYPE (expression) == unknown_type_node)
{
if (TREE_CODE (expression) == ADDR_EXPR)
return type_dependent_expression_p (TREE_OPERAND (expression, 0));
if (TREE_CODE (expression) == COMPONENT_REF
|| TREE_CODE (expression) == OFFSET_REF)
{
if (type_dependent_expression_p (TREE_OPERAND (expression, 0)))
return true;
expression = TREE_OPERAND (expression, 1);
if (identifier_p (expression))
return false;
}
/* SCOPE_REF with non-null TREE_TYPE is always non-dependent. */
if (TREE_CODE (expression) == SCOPE_REF)
return false;
/* CO_AWAIT/YIELD_EXPR with unknown type is always dependent. */
if (TREE_CODE (expression) == CO_AWAIT_EXPR
|| TREE_CODE (expression) == CO_YIELD_EXPR)
return true;
if (BASELINK_P (expression))
{
if (BASELINK_OPTYPE (expression)
&& dependent_type_p (BASELINK_OPTYPE (expression)))
return true;
expression = BASELINK_FUNCTIONS (expression);
}
if (TREE_CODE (expression) == TEMPLATE_ID_EXPR)
{
if (any_dependent_template_arguments_p
(TREE_OPERAND (expression, 1)))
return true;
expression = TREE_OPERAND (expression, 0);
if (identifier_p (expression))
return true;
}
gcc_assert (OVL_P (expression));
for (lkp_iterator iter (expression); iter; ++iter)
if (type_dependent_expression_p (*iter))
return true;
return false;
}
/* The type of a non-type template parm declared with a placeholder type
depends on the corresponding template argument, even though
placeholders are not normally considered dependent. */
if (TREE_CODE (expression) == TEMPLATE_PARM_INDEX
&& is_auto (TREE_TYPE (expression)))
return true;
gcc_assert (TREE_CODE (expression) != TYPE_DECL);
/* Dependent type attributes might not have made it from the decl to
the type yet. */
if (DECL_P (expression)
&& any_dependent_type_attributes_p (DECL_ATTRIBUTES (expression)))
return true;
return (dependent_type_p (TREE_TYPE (expression)));
}
/* [temp.dep.expr]/5: A class member access expression (5.2.5) is
type-dependent if the expression refers to a member of the current
instantiation and the type of the referenced member is dependent, or the
class member access expression refers to a member of an unknown
specialization.
This function returns true if the OBJECT in such a class member access
expression is of an unknown specialization. */
bool
type_dependent_object_expression_p (tree object)
{
/* An IDENTIFIER_NODE can sometimes have a TREE_TYPE, but it's still
dependent. */
if (TREE_CODE (object) == IDENTIFIER_NODE)
return true;
tree scope = TREE_TYPE (object);
return (!scope || dependent_scope_p (scope));
}
/* walk_tree callback function for instantiation_dependent_expression_p,
below. Returns non-zero if a dependent subexpression is found. */
static tree
instantiation_dependent_r (tree *tp, int *walk_subtrees,
void * /*data*/)
{
if (TYPE_P (*tp))
{
/* We don't have to worry about decltype currently because decltype
of an instantiation-dependent expr is a dependent type. This
might change depending on the resolution of DR 1172. */
*walk_subtrees = false;
return NULL_TREE;
}
enum tree_code code = TREE_CODE (*tp);
switch (code)
{
/* Don't treat an argument list as dependent just because it has no
TREE_TYPE. */
case TREE_LIST:
case TREE_VEC:
case NONTYPE_ARGUMENT_PACK:
return NULL_TREE;
case TEMPLATE_PARM_INDEX:
if (dependent_type_p (TREE_TYPE (*tp)))
return *tp;
if (TEMPLATE_PARM_PARAMETER_PACK (*tp))
return *tp;
/* We'll check value-dependence separately. */
return NULL_TREE;
/* Handle expressions with type operands. */
case SIZEOF_EXPR:
case ALIGNOF_EXPR:
case TYPEID_EXPR:
case AT_ENCODE_EXPR:
{
tree op = TREE_OPERAND (*tp, 0);
if (code == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (*tp))
op = TREE_TYPE (op);
if (TYPE_P (op))
{
if (dependent_type_p (op))
return *tp;
else
{
*walk_subtrees = false;
return NULL_TREE;
}
}
break;
}
case COMPONENT_REF:
if (identifier_p (TREE_OPERAND (*tp, 1)))
/* In a template, finish_class_member_access_expr creates a
COMPONENT_REF with an IDENTIFIER_NODE for op1 even if it isn't
type-dependent, so that we can check access control at
instantiation time (PR 42277). See also Core issue 1273. */
return *tp;
break;
case SCOPE_REF:
if (instantiation_dependent_scope_ref_p (*tp))
return *tp;
else
break;
/* Treat statement-expressions as dependent. */
case BIND_EXPR:
return *tp;
/* Treat requires-expressions as dependent. */
case REQUIRES_EXPR:
return *tp;
case CALL_EXPR:
/* Treat concept checks as dependent. */
if (concept_check_p (*tp))
return *tp;
break;
case TEMPLATE_ID_EXPR:
/* Treat concept checks as dependent. */
if (concept_check_p (*tp))
return *tp;
break;
case CONSTRUCTOR:
if (CONSTRUCTOR_IS_DEPENDENT (*tp))
return *tp;
break;
default:
break;
}
if (type_dependent_expression_p (*tp))
return *tp;
else
return NULL_TREE;
}
/* Returns TRUE if the EXPRESSION is instantiation-dependent, in the
sense defined by the ABI:
"An expression is instantiation-dependent if it is type-dependent
or value-dependent, or it has a subexpression that is type-dependent
or value-dependent."
Except don't actually check value-dependence for unevaluated expressions,
because in sizeof(i) we don't care about the value of i. Checking
type-dependence will in turn check value-dependence of array bounds/template
arguments as needed. */
bool
instantiation_dependent_uneval_expression_p (tree expression)
{
tree result;
if (!processing_template_decl)
return false;
if (expression == error_mark_node)
return false;
result = cp_walk_tree_without_duplicates (&expression,
instantiation_dependent_r, NULL);
return result != NULL_TREE;
}
/* As above, but also check value-dependence of the expression as a whole. */
bool
instantiation_dependent_expression_p (tree expression)
{
return (instantiation_dependent_uneval_expression_p (expression)
|| value_dependent_expression_p (expression));
}
/* Like type_dependent_expression_p, but it also works while not processing
a template definition, i.e. during substitution or mangling. */
bool
type_dependent_expression_p_push (tree expr)
{
bool b;
++processing_template_decl;
b = type_dependent_expression_p (expr);
--processing_template_decl;
return b;
}
/* Returns TRUE if ARGS contains a type-dependent expression. */
bool
any_type_dependent_arguments_p (const vec<tree, va_gc> *args)
{
unsigned int i;
tree arg;
FOR_EACH_VEC_SAFE_ELT (args, i, arg)
{
if (type_dependent_expression_p (arg))
return true;
}
return false;
}
/* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are
expressions) contains any type-dependent expressions. */
bool
any_type_dependent_elements_p (const_tree list)
{
for (; list; list = TREE_CHAIN (list))
if (type_dependent_expression_p (TREE_VALUE (list)))
return true;
return false;
}
/* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are
expressions) contains any value-dependent expressions. */
bool
any_value_dependent_elements_p (const_tree list)
{
for (; list; list = TREE_CHAIN (list))
if (value_dependent_expression_p (TREE_VALUE (list)))
return true;
return false;
}
/* Returns TRUE if the ARG (a template argument) is dependent. */
bool
dependent_template_arg_p (tree arg)
{
if (!processing_template_decl)
return false;
/* Assume a template argument that was wrongly written by the user
is dependent. This is consistent with what
any_dependent_template_arguments_p [that calls this function]
does. */
if (!arg || arg == error_mark_node)
return true;
if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT)
arg = argument_pack_select_arg (arg);
if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM)
return true;
if (TREE_CODE (arg) == TEMPLATE_DECL)
{
if (DECL_TEMPLATE_PARM_P (arg))
return true;
/* A member template of a dependent class is not necessarily
type-dependent, but it is a dependent template argument because it
will be a member of an unknown specialization to that template. */
tree scope = CP_DECL_CONTEXT (arg);
return TYPE_P (scope) && dependent_type_p (scope);
}
else if (ARGUMENT_PACK_P (arg))
{
tree args = ARGUMENT_PACK_ARGS (arg);
int i, len = TREE_VEC_LENGTH (args);
for (i = 0; i < len; ++i)
{
if (dependent_template_arg_p (TREE_VEC_ELT (args, i)))
return true;
}
return false;
}
else if (TYPE_P (arg))
return dependent_type_p (arg);
else
return value_dependent_expression_p (arg);
}
/* Returns true if ARGS (a collection of template arguments) contains
any types that require structural equality testing. */
bool
any_template_arguments_need_structural_equality_p (tree args)
{
int i;
int j;
if (!args)
return false;
if (args == error_mark_node)
return true;
for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i)
{
tree level = TMPL_ARGS_LEVEL (args, i + 1);
for (j = 0; j < TREE_VEC_LENGTH (level); ++j)
{
tree arg = TREE_VEC_ELT (level, j);
tree packed_args = NULL_TREE;
int k, len = 1;
if (ARGUMENT_PACK_P (arg))
{
/* Look inside the argument pack. */
packed_args = ARGUMENT_PACK_ARGS (arg);
len = TREE_VEC_LENGTH (packed_args);
}
for (k = 0; k < len; ++k)
{
if (packed_args)
arg = TREE_VEC_ELT (packed_args, k);
if (error_operand_p (arg))
return true;
else if (TREE_CODE (arg) == TEMPLATE_DECL)
continue;
else if (TYPE_P (arg) && TYPE_STRUCTURAL_EQUALITY_P (arg))
return true;
else if (!TYPE_P (arg) && TREE_TYPE (arg)
&& TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (arg)))
return true;
}
}
}
return false;
}
/* Returns true if ARGS (a collection of template arguments) contains
any dependent arguments. */
bool
any_dependent_template_arguments_p (const_tree args)
{
int i;
int j;
if (!args)
return false;
if (args == error_mark_node)
return true;
for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i)
{
const_tree level = TMPL_ARGS_LEVEL (args, i + 1);
for (j = 0; j < TREE_VEC_LENGTH (level); ++j)
if (dependent_template_arg_p (TREE_VEC_ELT (level, j)))
return true;
}
return false;
}
/* Returns true if ARGS contains any errors. */
bool
any_erroneous_template_args_p (const_tree args)
{
int i;
int j;
if (args == error_mark_node)
return true;
if (args && TREE_CODE (args) != TREE_VEC)
{
if (tree ti = get_template_info (args))
args = TI_ARGS (ti);
else
args = NULL_TREE;
}
if (!args)
return false;
for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i)
{
const_tree level = TMPL_ARGS_LEVEL (args, i + 1);
for (j = 0; j < TREE_VEC_LENGTH (level); ++j)
if (error_operand_p (TREE_VEC_ELT (level, j)))
return true;
}
return false;
}
/* Returns TRUE if the template TMPL is type-dependent. */
bool
dependent_template_p (tree tmpl)
{
if (TREE_CODE (tmpl) == OVERLOAD)
{
for (lkp_iterator iter (tmpl); iter; ++iter)
if (dependent_template_p (*iter))
return true;
return false;
}
/* Template template parameters are dependent. */
if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)
|| TREE_CODE (tmpl) == TEMPLATE_TEMPLATE_PARM)
return true;
/* So are names that have not been looked up. */
if (TREE_CODE (tmpl) == SCOPE_REF || identifier_p (tmpl))
return true;
return false;
}
/* Returns TRUE if the specialization TMPL<ARGS> is dependent. */
bool
dependent_template_id_p (tree tmpl, tree args)
{
return (dependent_template_p (tmpl)
|| any_dependent_template_arguments_p (args));
}
/* Returns TRUE if OMP_FOR with DECLV, INITV, CONDV and INCRV vectors
are dependent. */
bool
dependent_omp_for_p (tree declv, tree initv, tree condv, tree incrv)
{
int i;
if (!processing_template_decl)
return false;
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
if (type_dependent_expression_p (decl)
|| TREE_CODE (decl) == SCOPE_REF)
return true;
if (init && type_dependent_expression_p (init))
return true;
if (cond == global_namespace)
return true;
if (type_dependent_expression_p (cond))
return true;
if (COMPARISON_CLASS_P (cond)
&& (type_dependent_expression_p (TREE_OPERAND (cond, 0))
|| type_dependent_expression_p (TREE_OPERAND (cond, 1))))
return true;
if (TREE_CODE (incr) == MODOP_EXPR)
{
if (type_dependent_expression_p (TREE_OPERAND (incr, 0))
|| type_dependent_expression_p (TREE_OPERAND (incr, 2)))
return true;
}
else if (type_dependent_expression_p (incr))
return true;
else if (TREE_CODE (incr) == MODIFY_EXPR)
{
if (type_dependent_expression_p (TREE_OPERAND (incr, 0)))
return true;
else if (BINARY_CLASS_P (TREE_OPERAND (incr, 1)))
{
tree t = TREE_OPERAND (incr, 1);
if (type_dependent_expression_p (TREE_OPERAND (t, 0))
|| type_dependent_expression_p (TREE_OPERAND (t, 1)))
return true;
/* If this loop has a class iterator with != comparison
with increment other than i++/++i/i--/--i, make sure the
increment is constant. */
if (CLASS_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (cond) == NE_EXPR)
{
if (TREE_OPERAND (t, 0) == decl)
t = TREE_OPERAND (t, 1);
else
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) != INTEGER_CST)
return true;
}
}
}
}
return false;
}
/* TYPE is a TYPENAME_TYPE. Returns the ordinary TYPE to which the
TYPENAME_TYPE corresponds. Returns the original TYPENAME_TYPE if
no such TYPE can be found. Note that this function peers inside
uninstantiated templates and therefore should be used only in
extremely limited situations. ONLY_CURRENT_P restricts this
peering to the currently open classes hierarchy (which is required
when comparing types). */
tree
resolve_typename_type (tree type, bool only_current_p)
{
tree scope;
tree name;
tree decl;
int quals;
tree pushed_scope;
tree result;
gcc_assert (TREE_CODE (type) == TYPENAME_TYPE);
scope = TYPE_CONTEXT (type);
/* We shouldn't have built a TYPENAME_TYPE with a non-dependent scope. */
gcc_checking_assert (uses_template_parms (scope));
/* Usually the non-qualified identifier of a TYPENAME_TYPE is
TYPE_IDENTIFIER (type). But when 'type' is a typedef variant of
a TYPENAME_TYPE node, then TYPE_NAME (type) is set to the TYPE_DECL representing
the typedef. In that case TYPE_IDENTIFIER (type) is not the non-qualified
identifier of the TYPENAME_TYPE anymore.
So by getting the TYPE_IDENTIFIER of the _main declaration_ of the
TYPENAME_TYPE instead, we avoid messing up with a possible
typedef variant case. */
name = TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (type));
/* If the SCOPE is itself a TYPENAME_TYPE, then we need to resolve
it first before we can figure out what NAME refers to. */
if (TREE_CODE (scope) == TYPENAME_TYPE)
{
if (TYPENAME_IS_RESOLVING_P (scope))
/* Given a class template A with a dependent base with nested type C,
typedef typename A::C::C C will land us here, as trying to resolve
the initial A::C leads to the local C typedef, which leads back to
A::C::C. So we break the recursion now. */
return type;
else
scope = resolve_typename_type (scope, only_current_p);
}
/* If we don't know what SCOPE refers to, then we cannot resolve the
TYPENAME_TYPE. */
if (!CLASS_TYPE_P (scope))
return type;
/* If this is a typedef, we don't want to look inside (c++/11987). */
if (typedef_variant_p (type))
return type;
/* If SCOPE isn't the template itself, it will not have a valid
TYPE_FIELDS list. */
if (same_type_p (scope, CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope)))
/* scope is either the template itself or a compatible instantiation
like X<T>, so look up the name in the original template. */
scope = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope);
/* If scope has no fields, it can't be a current instantiation. Check this
before currently_open_class to avoid infinite recursion (71515). */
if (!TYPE_FIELDS (scope))
return type;
/* If the SCOPE is not the current instantiation, there's no reason
to look inside it. */
if (only_current_p && !currently_open_class (scope))
return type;
/* Enter the SCOPE so that name lookup will be resolved as if we
were in the class definition. In particular, SCOPE will no
longer be considered a dependent type. */
pushed_scope = push_scope (scope);
/* Look up the declaration. */
decl = lookup_member (scope, name, /*protect=*/0, /*want_type=*/true,
tf_warning_or_error);
result = NULL_TREE;
/* For a TYPENAME_TYPE like "typename X::template Y<T>", we want to
find a TEMPLATE_DECL. Otherwise, we want to find a TYPE_DECL. */
tree fullname = TYPENAME_TYPE_FULLNAME (type);
if (!decl)
/*nop*/;
else if (identifier_p (fullname)
&& TREE_CODE (decl) == TYPE_DECL)
{
result = TREE_TYPE (decl);
if (result == error_mark_node)
result = NULL_TREE;
}
else if (TREE_CODE (fullname) == TEMPLATE_ID_EXPR
&& DECL_CLASS_TEMPLATE_P (decl))
{
/* Obtain the template and the arguments. */
tree tmpl = TREE_OPERAND (fullname, 0);
if (TREE_CODE (tmpl) == IDENTIFIER_NODE)
{
/* We get here with a plain identifier because a previous tentative
parse of the nested-name-specifier as part of a ptr-operator saw
::template X<A>. The use of ::template is necessary in a
ptr-operator, but wrong in a declarator-id.
[temp.names]: In a qualified-id of a declarator-id, the keyword
template shall not appear at the top level. */
pedwarn (cp_expr_loc_or_input_loc (fullname), OPT_Wpedantic,
"keyword %<template%> not allowed in declarator-id");
tmpl = decl;
}
tree args = TREE_OPERAND (fullname, 1);
/* Instantiate the template. */
result = lookup_template_class (tmpl, args, NULL_TREE, NULL_TREE,
/*entering_scope=*/true,
tf_error | tf_user);
if (result == error_mark_node)
result = NULL_TREE;
}
/* Leave the SCOPE. */
if (pushed_scope)
pop_scope (pushed_scope);
/* If we failed to resolve it, return the original typename. */
if (!result)
return type;
/* If lookup found a typename type, resolve that too. */
if (TREE_CODE (result) == TYPENAME_TYPE && !TYPENAME_IS_RESOLVING_P (result))
{
/* Ill-formed programs can cause infinite recursion here, so we
must catch that. */
TYPENAME_IS_RESOLVING_P (result) = 1;
result = resolve_typename_type (result, only_current_p);
TYPENAME_IS_RESOLVING_P (result) = 0;
}
/* Qualify the resulting type. */
quals = cp_type_quals (type);
if (quals)
result = cp_build_qualified_type (result, cp_type_quals (result) | quals);
return result;
}
/* EXPR is an expression which is not type-dependent. Return a proxy
for EXPR that can be used to compute the types of larger
expressions containing EXPR. */
tree
build_non_dependent_expr (tree expr)
{
tree orig_expr = expr;
tree inner_expr;
/* When checking, try to get a constant value for all non-dependent
expressions in order to expose bugs in *_dependent_expression_p
and constexpr. This can affect code generation, see PR70704, so
only do this for -fchecking=2. */
if (flag_checking > 1
&& cxx_dialect >= cxx11
/* Don't do this during nsdmi parsing as it can lead to
unexpected recursive instantiations. */
&& !parsing_nsdmi ()
/* Don't do this during concept processing either and for
the same reason. */
&& !processing_constraint_expression_p ())
fold_non_dependent_expr (expr, tf_none);
STRIP_ANY_LOCATION_WRAPPER (expr);
/* Preserve OVERLOADs; the functions must be available to resolve
types. */
inner_expr = expr;
if (TREE_CODE (inner_expr) == STMT_EXPR)
inner_expr = stmt_expr_value_expr (inner_expr);
if (TREE_CODE (inner_expr) == ADDR_EXPR)
inner_expr = TREE_OPERAND (inner_expr, 0);
if (TREE_CODE (inner_expr) == COMPONENT_REF)
inner_expr = TREE_OPERAND (inner_expr, 1);
if (is_overloaded_fn (inner_expr)
|| TREE_CODE (inner_expr) == OFFSET_REF)
return orig_expr;
/* There is no need to return a proxy for a variable or enumerator. */
if (VAR_P (expr) || TREE_CODE (expr) == CONST_DECL)
return orig_expr;
/* Preserve string constants; conversions from string constants to
"char *" are allowed, even though normally a "const char *"
cannot be used to initialize a "char *". */
if (TREE_CODE (expr) == STRING_CST)
return orig_expr;
/* Preserve void and arithmetic constants, as an optimization -- there is no
reason to create a new node. */
if (TREE_CODE (expr) == VOID_CST
|| TREE_CODE (expr) == INTEGER_CST
|| TREE_CODE (expr) == REAL_CST)
return orig_expr;
/* Preserve THROW_EXPRs -- all throw-expressions have type "void".
There is at least one place where we want to know that a
particular expression is a throw-expression: when checking a ?:
expression, there are special rules if the second or third
argument is a throw-expression. */
if (TREE_CODE (expr) == THROW_EXPR)
return orig_expr;
/* Don't wrap an initializer list, we need to be able to look inside. */
if (BRACE_ENCLOSED_INITIALIZER_P (expr))
return orig_expr;
/* Don't wrap a dummy object, we need to be able to test for it. */
if (is_dummy_object (expr))
return orig_expr;
if (TREE_CODE (expr) == COND_EXPR)
return build3 (COND_EXPR,
TREE_TYPE (expr),
build_non_dependent_expr (TREE_OPERAND (expr, 0)),
(TREE_OPERAND (expr, 1)
? build_non_dependent_expr (TREE_OPERAND (expr, 1))
: build_non_dependent_expr (TREE_OPERAND (expr, 0))),
build_non_dependent_expr (TREE_OPERAND (expr, 2)));
if (TREE_CODE (expr) == COMPOUND_EXPR
&& !COMPOUND_EXPR_OVERLOADED (expr))
return build2 (COMPOUND_EXPR,
TREE_TYPE (expr),
TREE_OPERAND (expr, 0),
build_non_dependent_expr (TREE_OPERAND (expr, 1)));
/* If the type is unknown, it can't really be non-dependent */
gcc_assert (TREE_TYPE (expr) != unknown_type_node);
/* Otherwise, build a NON_DEPENDENT_EXPR. */
return build1_loc (EXPR_LOCATION (orig_expr), NON_DEPENDENT_EXPR,
TREE_TYPE (expr), expr);
}
/* ARGS is a vector of expressions as arguments to a function call.
Replace the arguments with equivalent non-dependent expressions.
This modifies ARGS in place. */
void
make_args_non_dependent (vec<tree, va_gc> *args)
{
unsigned int ix;
tree arg;
FOR_EACH_VEC_SAFE_ELT (args, ix, arg)
{
tree newarg = build_non_dependent_expr (arg);
if (newarg != arg)
(*args)[ix] = newarg;
}
}
/* Returns a type which represents 'auto' or 'decltype(auto)'. We use a
TEMPLATE_TYPE_PARM with a level one deeper than the actual template
parms. If set_canonical is true, we set TYPE_CANONICAL on it. */
static tree
make_auto_1 (tree name, bool set_canonical)
{
tree au = cxx_make_type (TEMPLATE_TYPE_PARM);
TYPE_NAME (au) = build_decl (input_location, TYPE_DECL, name, au);
TYPE_STUB_DECL (au) = TYPE_NAME (au);
TEMPLATE_TYPE_PARM_INDEX (au) = build_template_parm_index
(0, processing_template_decl + 1, processing_template_decl + 1,
TYPE_NAME (au), NULL_TREE);
if (set_canonical)
TYPE_CANONICAL (au) = canonical_type_parameter (au);
DECL_ARTIFICIAL (TYPE_NAME (au)) = 1;
SET_DECL_TEMPLATE_PARM_P (TYPE_NAME (au));
if (name == decltype_auto_identifier)
AUTO_IS_DECLTYPE (au) = true;
return au;
}
tree
make_decltype_auto (void)
{
return make_auto_1 (decltype_auto_identifier, true);
}
tree
make_auto (void)
{
return make_auto_1 (auto_identifier, true);
}
/* Return a C++17 deduction placeholder for class template TMPL. */
tree
make_template_placeholder (tree tmpl)
{
tree t = make_auto_1 (auto_identifier, false);
CLASS_PLACEHOLDER_TEMPLATE (t) = tmpl;
/* Our canonical type depends on the placeholder. */
TYPE_CANONICAL (t) = canonical_type_parameter (t);
return t;
}
/* True iff T is a C++17 class template deduction placeholder. */
bool
template_placeholder_p (tree t)
{
return is_auto (t) && CLASS_PLACEHOLDER_TEMPLATE (t);
}
/* Make a "constrained auto" type-specifier. This is an auto or
decltype(auto) type with constraints that must be associated after
deduction. The constraint is formed from the given concept CON
and its optional sequence of template arguments ARGS.
TYPE must be the result of make_auto_type or make_decltype_auto_type. */
static tree
make_constrained_placeholder_type (tree type, tree con, tree args)
{
/* Build the constraint. */
tree tmpl = DECL_TI_TEMPLATE (con);
tree expr = tmpl;
if (TREE_CODE (con) == FUNCTION_DECL)
expr = ovl_make (tmpl);
expr = build_concept_check (expr, type, args, tf_warning_or_error);
PLACEHOLDER_TYPE_CONSTRAINTS (type) = expr;
/* Our canonical type depends on the constraint. */
TYPE_CANONICAL (type) = canonical_type_parameter (type);
/* Attach the constraint to the type declaration. */
return TYPE_NAME (type);
}
/* Make a "constrained auto" type-specifier. */
tree
make_constrained_auto (tree con, tree args)
{
tree type = make_auto_1 (auto_identifier, false);
return make_constrained_placeholder_type (type, con, args);
}
/* Make a "constrained decltype(auto)" type-specifier. */
tree
make_constrained_decltype_auto (tree con, tree args)
{
tree type = make_auto_1 (decltype_auto_identifier, false);
return make_constrained_placeholder_type (type, con, args);
}
/* Build and return a concept definition. Like other templates, the
CONCEPT_DECL node is wrapped by a TEMPLATE_DECL. This returns the
the TEMPLATE_DECL. */
tree
finish_concept_definition (cp_expr id, tree init)
{
gcc_assert (identifier_p (id));
gcc_assert (processing_template_decl);
location_t loc = id.get_location();
/* A concept-definition shall not have associated constraints. */
if (TEMPLATE_PARMS_CONSTRAINTS (current_template_parms))
{
error_at (loc, "a concept cannot be constrained");
TEMPLATE_PARMS_CONSTRAINTS (current_template_parms) = NULL_TREE;
}
/* A concept-definition shall appear in namespace scope. Templates
aren't allowed in block scope, so we only need to check for class
scope. */
if (TYPE_P (current_scope()) || !DECL_NAMESPACE_SCOPE_P (current_scope ()))
{
error_at (loc, "concept %qE not in namespace scope", *id);
return error_mark_node;
}
/* Initially build the concept declaration; it's type is bool. */
tree decl = build_lang_decl_loc (loc, CONCEPT_DECL, *id, boolean_type_node);
DECL_CONTEXT (decl) = current_scope ();
DECL_INITIAL (decl) = init;
/* Push the enclosing template. */
return push_template_decl (decl);
}
/* Given type ARG, return std::initializer_list<ARG>. */
static tree
listify (tree arg)
{
tree std_init_list = get_namespace_binding (std_node, init_list_identifier);
if (!std_init_list || !DECL_CLASS_TEMPLATE_P (std_init_list))
{
gcc_rich_location richloc (input_location);
maybe_add_include_fixit (&richloc, "<initializer_list>", false);
error_at (&richloc,
"deducing from brace-enclosed initializer list"
" requires %<#include <initializer_list>%>");
return error_mark_node;
}
tree argvec = make_tree_vec (1);
TREE_VEC_ELT (argvec, 0) = arg;
return lookup_template_class (std_init_list, argvec, NULL_TREE,
NULL_TREE, 0, tf_warning_or_error);
}
/* Replace auto in TYPE with std::initializer_list<auto>. */
static tree
listify_autos (tree type, tree auto_node)
{
tree init_auto = listify (strip_top_quals (auto_node));
tree argvec = make_tree_vec (1);
TREE_VEC_ELT (argvec, 0) = init_auto;
if (processing_template_decl)
argvec = add_to_template_args (current_template_args (), argvec);
return tsubst (type, argvec, tf_warning_or_error, NULL_TREE);
}
/* Hash traits for hashing possibly constrained 'auto'
TEMPLATE_TYPE_PARMs for use by do_auto_deduction. */
struct auto_hash : default_hash_traits<tree>
{
static inline hashval_t hash (tree);
static inline bool equal (tree, tree);
};
/* Hash the 'auto' T. */
inline hashval_t
auto_hash::hash (tree t)
{
if (tree c = NON_ERROR (PLACEHOLDER_TYPE_CONSTRAINTS (t)))
/* Matching constrained-type-specifiers denote the same template
parameter, so hash the constraint. */
return hash_placeholder_constraint (c);
else
/* But unconstrained autos are all separate, so just hash the pointer. */
return iterative_hash_object (t, 0);
}
/* Compare two 'auto's. */
inline bool
auto_hash::equal (tree t1, tree t2)
{
if (t1 == t2)
return true;
tree c1 = PLACEHOLDER_TYPE_CONSTRAINTS (t1);
tree c2 = PLACEHOLDER_TYPE_CONSTRAINTS (t2);
/* Two unconstrained autos are distinct. */
if (!c1 || !c2)
return false;
return equivalent_placeholder_constraints (c1, c2);
}
/* for_each_template_parm callback for extract_autos: if t is a (possibly
constrained) auto, add it to the vector. */
static int
extract_autos_r (tree t, void *data)
{
hash_table<auto_hash> &hash = *(hash_table<auto_hash>*)data;
if (is_auto (t))
{
/* All the autos were built with index 0; fix that up now. */
tree *p = hash.find_slot (t, INSERT);
unsigned idx;
if (*p)
/* If this is a repeated constrained-type-specifier, use the index we
chose before. */
idx = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (*p));
else
{
/* Otherwise this is new, so use the current count. */
*p = t;
idx = hash.elements () - 1;
}
TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (t)) = idx;
}
/* Always keep walking. */
return 0;
}
/* Return a TREE_VEC of the 'auto's used in type under the Concepts TS, which
says they can appear anywhere in the type. */
static tree
extract_autos (tree type)
{
hash_set<tree> visited;
hash_table<auto_hash> hash (2);
for_each_template_parm (type, extract_autos_r, &hash, &visited, true);
tree tree_vec = make_tree_vec (hash.elements());
for (hash_table<auto_hash>::iterator iter = hash.begin();
iter != hash.end(); ++iter)
{
tree elt = *iter;
unsigned i = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (elt));
TREE_VEC_ELT (tree_vec, i)
= build_tree_list (NULL_TREE, TYPE_NAME (elt));
}
return tree_vec;
}
/* The stem for deduction guide names. */
const char *const dguide_base = "__dguide_";
/* Return the name for a deduction guide for class template TMPL. */
tree
dguide_name (tree tmpl)
{
tree type = (TYPE_P (tmpl) ? tmpl : TREE_TYPE (tmpl));
tree tname = TYPE_IDENTIFIER (type);
char *buf = (char *) alloca (1 + strlen (dguide_base)
+ IDENTIFIER_LENGTH (tname));
memcpy (buf, dguide_base, strlen (dguide_base));
memcpy (buf + strlen (dguide_base), IDENTIFIER_POINTER (tname),
IDENTIFIER_LENGTH (tname) + 1);
tree dname = get_identifier (buf);
TREE_TYPE (dname) = type;
return dname;
}
/* True if NAME is the name of a deduction guide. */
bool
dguide_name_p (tree name)
{
return (TREE_CODE (name) == IDENTIFIER_NODE
&& TREE_TYPE (name)
&& !strncmp (IDENTIFIER_POINTER (name), dguide_base,
strlen (dguide_base)));
}
/* True if FN is a deduction guide. */
bool
deduction_guide_p (const_tree fn)
{
if (DECL_P (fn))
if (tree name = DECL_NAME (fn))
return dguide_name_p (name);
return false;
}
/* True if FN is the copy deduction guide, i.e. A(A)->A. */
bool
copy_guide_p (const_tree fn)
{
gcc_assert (deduction_guide_p (fn));
if (!DECL_ARTIFICIAL (fn))
return false;
tree parms = FUNCTION_FIRST_USER_PARMTYPE (DECL_TI_TEMPLATE (fn));
return (TREE_CHAIN (parms) == void_list_node
&& same_type_p (TREE_VALUE (parms), TREE_TYPE (DECL_NAME (fn))));
}
/* True if FN is a guide generated from a constructor template. */
bool
template_guide_p (const_tree fn)
{
gcc_assert (deduction_guide_p (fn));
if (!DECL_ARTIFICIAL (fn))
return false;
tree tmpl = DECL_TI_TEMPLATE (fn);
if (tree org = DECL_ABSTRACT_ORIGIN (tmpl))
return PRIMARY_TEMPLATE_P (org);
return false;
}
/* OLDDECL is a _DECL for a template parameter. Return a similar parameter at
LEVEL:INDEX, using tsubst_args and complain for substitution into non-type
template parameter types. Note that the handling of template template
parameters relies on current_template_parms being set appropriately for the
new template. */
static tree
rewrite_template_parm (tree olddecl, unsigned index, unsigned level,
tree tsubst_args, tsubst_flags_t complain)
{
if (olddecl == error_mark_node)
return error_mark_node;
tree oldidx = get_template_parm_index (olddecl);
tree newtype;
if (TREE_CODE (olddecl) == TYPE_DECL
|| TREE_CODE (olddecl) == TEMPLATE_DECL)
{
tree oldtype = TREE_TYPE (olddecl);
newtype = cxx_make_type (TREE_CODE (oldtype));
TYPE_MAIN_VARIANT (newtype) = newtype;
if (TREE_CODE (oldtype) == TEMPLATE_TYPE_PARM)
TEMPLATE_TYPE_PARM_FOR_CLASS (newtype)
= TEMPLATE_TYPE_PARM_FOR_CLASS (oldtype);
}
else
{
newtype = TREE_TYPE (olddecl);
if (type_uses_auto (newtype))
{
// Substitute once to fix references to other template parameters.
newtype = tsubst (newtype, tsubst_args,
complain|tf_partial, NULL_TREE);
// Now substitute again to reduce the level of the auto.
newtype = tsubst (newtype, current_template_args (),
complain, NULL_TREE);
}
else
newtype = tsubst (newtype, tsubst_args,
complain, NULL_TREE);
}
tree newdecl
= build_decl (DECL_SOURCE_LOCATION (olddecl), TREE_CODE (olddecl),
DECL_NAME (olddecl), newtype);
SET_DECL_TEMPLATE_PARM_P (newdecl);
tree newidx;
if (TREE_CODE (olddecl) == TYPE_DECL
|| TREE_CODE (olddecl) == TEMPLATE_DECL)
{
newidx = TEMPLATE_TYPE_PARM_INDEX (newtype)
= build_template_parm_index (index, level, level,
newdecl, newtype);
TEMPLATE_PARM_PARAMETER_PACK (newidx)
= TEMPLATE_PARM_PARAMETER_PACK (oldidx);
TYPE_STUB_DECL (newtype) = TYPE_NAME (newtype) = newdecl;
if (TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (olddecl)))
SET_TYPE_STRUCTURAL_EQUALITY (newtype);
else
TYPE_CANONICAL (newtype) = canonical_type_parameter (newtype);
if (TREE_CODE (olddecl) == TEMPLATE_DECL)
{
DECL_TEMPLATE_RESULT (newdecl)
= build_decl (DECL_SOURCE_LOCATION (olddecl), TYPE_DECL,
DECL_NAME (olddecl), newtype);
DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (newdecl)) = true;
// First create a copy (ttargs) of tsubst_args with an
// additional level for the template template parameter's own
// template parameters (ttparms).
tree ttparms = (INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (olddecl)));
const int depth = TMPL_ARGS_DEPTH (tsubst_args);
tree ttargs = make_tree_vec (depth + 1);
for (int i = 0; i < depth; ++i)
TREE_VEC_ELT (ttargs, i) = TREE_VEC_ELT (tsubst_args, i);
TREE_VEC_ELT (ttargs, depth)
= template_parms_level_to_args (ttparms);
// Substitute ttargs into ttparms to fix references to
// other template parameters.
ttparms = tsubst_template_parms_level (ttparms, ttargs,
complain|tf_partial);
// Now substitute again with args based on tparms, to reduce
// the level of the ttparms.
ttargs = current_template_args ();
ttparms = tsubst_template_parms_level (ttparms, ttargs,
complain);
// Finally, tack the adjusted parms onto tparms.
ttparms = tree_cons (size_int (depth), ttparms,
current_template_parms);
DECL_TEMPLATE_PARMS (newdecl) = ttparms;
}
}
else
{
tree oldconst = TEMPLATE_PARM_DECL (oldidx);
tree newconst
= build_decl (DECL_SOURCE_LOCATION (oldconst),
TREE_CODE (oldconst),
DECL_NAME (oldconst), newtype);
TREE_CONSTANT (newconst) = TREE_CONSTANT (newdecl)
= TREE_READONLY (newconst) = TREE_READONLY (newdecl) = true;
SET_DECL_TEMPLATE_PARM_P (newconst);
newidx = build_template_parm_index (index, level, level,
newconst, newtype);
TEMPLATE_PARM_PARAMETER_PACK (newidx)
= TEMPLATE_PARM_PARAMETER_PACK (oldidx);
DECL_INITIAL (newdecl) = DECL_INITIAL (newconst) = newidx;
}
return newdecl;
}
/* As rewrite_template_parm, but for the whole TREE_LIST representing a
template parameter. */
static tree
rewrite_tparm_list (tree oldelt, unsigned index, unsigned level,
tree targs, unsigned targs_index, tsubst_flags_t complain)
{
tree olddecl = TREE_VALUE (oldelt);
tree newdecl = rewrite_template_parm (olddecl, index, level,
targs, complain);
if (newdecl == error_mark_node)
return error_mark_node;
tree newdef = tsubst_template_arg (TREE_PURPOSE (oldelt),
targs, complain, NULL_TREE);
tree list = build_tree_list (newdef, newdecl);
TEMPLATE_PARM_CONSTRAINTS (list)
= tsubst_constraint_info (TEMPLATE_PARM_CONSTRAINTS (oldelt),
targs, complain, NULL_TREE);
int depth = TMPL_ARGS_DEPTH (targs);
TMPL_ARG (targs, depth, targs_index) = template_parm_to_arg (list);
return list;
}
/* Returns a C++17 class deduction guide template based on the constructor
CTOR. As a special case, CTOR can be a RECORD_TYPE for an implicit default
guide, REFERENCE_TYPE for an implicit copy/move guide, or TREE_LIST for an
aggregate initialization guide. */
static tree
build_deduction_guide (tree type, tree ctor, tree outer_args, tsubst_flags_t complain)
{
tree tparms, targs, fparms, fargs, ci;
bool memtmpl = false;
bool explicit_p;
location_t loc;
tree fn_tmpl = NULL_TREE;
if (outer_args)
{
++processing_template_decl;
type = tsubst (type, outer_args, complain, CLASSTYPE_TI_TEMPLATE (type));
--processing_template_decl;
}
if (!DECL_DECLARES_FUNCTION_P (ctor))
{
if (TYPE_P (ctor))
{
bool copy_p = TYPE_REF_P (ctor);
if (copy_p)
fparms = tree_cons (NULL_TREE, type, void_list_node);
else
fparms = void_list_node;
}
else if (TREE_CODE (ctor) == TREE_LIST)
fparms = ctor;
else
gcc_unreachable ();
tree ctmpl = CLASSTYPE_TI_TEMPLATE (type);
tparms = DECL_TEMPLATE_PARMS (ctmpl);
targs = CLASSTYPE_TI_ARGS (type);
ci = NULL_TREE;
fargs = NULL_TREE;
loc = DECL_SOURCE_LOCATION (ctmpl);
explicit_p = false;
}
else
{
++processing_template_decl;
bool ok = true;
fn_tmpl
= (TREE_CODE (ctor) == TEMPLATE_DECL ? ctor
: DECL_TI_TEMPLATE (ctor));
if (outer_args)
fn_tmpl = tsubst (fn_tmpl, outer_args, complain, ctor);
ctor = DECL_TEMPLATE_RESULT (fn_tmpl);
tparms = DECL_TEMPLATE_PARMS (fn_tmpl);
/* If type is a member class template, DECL_TI_ARGS (ctor) will have
fully specialized args for the enclosing class. Strip those off, as
the deduction guide won't have those template parameters. */
targs = get_innermost_template_args (DECL_TI_ARGS (ctor),
TMPL_PARMS_DEPTH (tparms));
/* Discard the 'this' parameter. */
fparms = FUNCTION_ARG_CHAIN (ctor);
fargs = TREE_CHAIN (DECL_ARGUMENTS (ctor));
ci = get_constraints (ctor);
loc = DECL_SOURCE_LOCATION (ctor);
explicit_p = DECL_NONCONVERTING_P (ctor);
if (PRIMARY_TEMPLATE_P (fn_tmpl))
{
memtmpl = true;
/* For a member template constructor, we need to flatten the two
template parameter lists into one, and then adjust the function
signature accordingly. This gets...complicated. */
tree save_parms = current_template_parms;
/* For a member template we should have two levels of parms/args, one
for the class and one for the constructor. We stripped
specialized args for further enclosing classes above. */
const int depth = 2;
gcc_assert (TMPL_ARGS_DEPTH (targs) == depth);
/* Template args for translating references to the two-level template
parameters into references to the one-level template parameters we
are creating. */
tree tsubst_args = copy_node (targs);
TMPL_ARGS_LEVEL (tsubst_args, depth)
= copy_node (TMPL_ARGS_LEVEL (tsubst_args, depth));
/* Template parms for the constructor template. */
tree ftparms = TREE_VALUE (tparms);
unsigned flen = TREE_VEC_LENGTH (ftparms);
/* Template parms for the class template. */
tparms = TREE_CHAIN (tparms);
tree ctparms = TREE_VALUE (tparms);
unsigned clen = TREE_VEC_LENGTH (ctparms);
/* Template parms for the deduction guide start as a copy of the
template parms for the class. We set current_template_parms for
lookup_template_class_1. */
current_template_parms = tparms = copy_node (tparms);
tree new_vec = TREE_VALUE (tparms) = make_tree_vec (flen + clen);
for (unsigned i = 0; i < clen; ++i)
TREE_VEC_ELT (new_vec, i) = TREE_VEC_ELT (ctparms, i);
/* Now we need to rewrite the constructor parms to append them to the
class parms. */
for (unsigned i = 0; i < flen; ++i)
{
unsigned index = i + clen;
unsigned level = 1;
tree oldelt = TREE_VEC_ELT (ftparms, i);
tree newelt
= rewrite_tparm_list (oldelt, index, level,
tsubst_args, i, complain);
if (newelt == error_mark_node)
ok = false;
TREE_VEC_ELT (new_vec, index) = newelt;
}
/* Now we have a final set of template parms to substitute into the
function signature. */
targs = template_parms_to_args (tparms);
fparms = tsubst_arg_types (fparms, tsubst_args, NULL_TREE,
complain, ctor);
if (fparms == error_mark_node)
ok = false;
if (ci)
ci = tsubst_constraint_info (ci, tsubst_args, complain, ctor);
/* Parms are to have DECL_CHAIN tsubsted, which would be skipped if
cp_unevaluated_operand. */
cp_evaluated ev;
fargs = tsubst (fargs, tsubst_args, complain, ctor);
current_template_parms = save_parms;
}
--processing_template_decl;
if (!ok)
return error_mark_node;
}
if (!memtmpl)
{
/* Copy the parms so we can set DECL_PRIMARY_TEMPLATE. */
tparms = copy_node (tparms);
INNERMOST_TEMPLATE_PARMS (tparms)
= copy_node (INNERMOST_TEMPLATE_PARMS (tparms));
}
tree fntype = build_function_type (type, fparms);
tree ded_fn = build_lang_decl_loc (loc,
FUNCTION_DECL,
dguide_name (type), fntype);
DECL_ARGUMENTS (ded_fn) = fargs;
DECL_ARTIFICIAL (ded_fn) = true;
DECL_NONCONVERTING_P (ded_fn) = explicit_p;
tree ded_tmpl = build_template_decl (ded_fn, tparms, /*member*/false);
DECL_ARTIFICIAL (ded_tmpl) = true;
DECL_TEMPLATE_RESULT (ded_tmpl) = ded_fn;
TREE_TYPE (ded_tmpl) = TREE_TYPE (ded_fn);
DECL_TEMPLATE_INFO (ded_fn) = build_template_info (ded_tmpl, targs);
DECL_PRIMARY_TEMPLATE (ded_tmpl) = ded_tmpl;
if (DECL_P (ctor))
DECL_ABSTRACT_ORIGIN (ded_tmpl) = fn_tmpl;
if (ci)
set_constraints (ded_tmpl, ci);
return ded_tmpl;
}
/* Add to LIST the member types for the reshaped initializer CTOR. */
static tree
collect_ctor_idx_types (tree ctor, tree list)
{
vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (ctor);
tree idx, val; unsigned i;
FOR_EACH_CONSTRUCTOR_ELT (v, i, idx, val)
{
if (BRACE_ENCLOSED_INITIALIZER_P (val)
&& CONSTRUCTOR_NELTS (val))
if (tree subidx = CONSTRUCTOR_ELT (val, 0)->index)
if (TREE_CODE (subidx) == FIELD_DECL)
{
list = collect_ctor_idx_types (val, list);
continue;
}
tree ftype = finish_decltype_type (idx, true, tf_none);
list = tree_cons (NULL_TREE, ftype, list);
}
return list;
}
/* Return whether ETYPE is, or is derived from, a specialization of TMPL. */
static bool
is_spec_or_derived (tree etype, tree tmpl)
{
if (!etype || !CLASS_TYPE_P (etype))
return false;
tree type = TREE_TYPE (tmpl);
tree tparms = (INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (tmpl)));
tree targs = make_tree_vec (TREE_VEC_LENGTH (tparms));
int err = unify (tparms, targs, type, etype,
UNIFY_ALLOW_DERIVED, /*explain*/false);
ggc_free (targs);
return !err;
}
/* Return a C++20 aggregate deduction candidate for TYPE initialized from
INIT. */
static tree
maybe_aggr_guide (tree tmpl, tree init, vec<tree,va_gc> *args)
{
if (cxx_dialect < cxx2a)
return NULL_TREE;
if (init == NULL_TREE)
return NULL_TREE;
tree type = TREE_TYPE (tmpl);
if (!CP_AGGREGATE_TYPE_P (type))
return NULL_TREE;
/* No aggregate candidate for copy-initialization. */
if (args->length() == 1)
{
tree val = (*args)[0];
if (is_spec_or_derived (tmpl, TREE_TYPE (val)))
return NULL_TREE;
}
/* If we encounter a problem, we just won't add the candidate. */
tsubst_flags_t complain = tf_none;
tree parms = NULL_TREE;
if (BRACE_ENCLOSED_INITIALIZER_P (init))
{
init = reshape_init (type, init, complain);
if (init == error_mark_node)
return NULL_TREE;
parms = collect_ctor_idx_types (init, parms);
}
else if (TREE_CODE (init) == TREE_LIST)
{
int len = list_length (init);
for (tree field = TYPE_FIELDS (type);
len;
--len, field = DECL_CHAIN (field))
{
field = next_initializable_field (field);
if (!field)
return NULL_TREE;
tree ftype = finish_decltype_type (field, true, complain);
parms = tree_cons (NULL_TREE, ftype, parms);
}
}
else
/* Aggregate initialization doesn't apply to an initializer expression. */
return NULL_TREE;
if (parms)
{
tree last = parms;
parms = nreverse (parms);
TREE_CHAIN (last) = void_list_node;
tree guide = build_deduction_guide (type, parms, NULL_TREE, complain);
return guide;
}
return NULL_TREE;
}
/* UGUIDES are the deduction guides for the underlying template of alias
template TMPL; adjust them to be deduction guides for TMPL. */
static tree
alias_ctad_tweaks (tree tmpl, tree uguides)
{
/* [over.match.class.deduct]: When resolving a placeholder for a deduced
class type (9.2.8.2) where the template-name names an alias template A,
the defining-type-id of A must be of the form
typename(opt) nested-name-specifier(opt) template(opt) simple-template-id
as specified in 9.2.8.2. The guides of A are the set of functions or
function templates formed as follows. For each function or function
template f in the guides of the template named by the simple-template-id
of the defining-type-id, the template arguments of the return type of f
are deduced from the defining-type-id of A according to the process in
13.10.2.5 with the exception that deduction does not fail if not all
template arguments are deduced. Let g denote the result of substituting
these deductions into f. If substitution succeeds, form a function or
function template f' with the following properties and add it to the set
of guides of A:
* The function type of f' is the function type of g.
* If f is a function template, f' is a function template whose template
parameter list consists of all the template parameters of A (including
their default template arguments) that appear in the above deductions or
(recursively) in their default template arguments, followed by the
template parameters of f that were not deduced (including their default
template arguments), otherwise f' is not a function template.
* The associated constraints (13.5.2) are the conjunction of the
associated constraints of g and a constraint that is satisfied if and only
if the arguments of A are deducible (see below) from the return type.
* If f is a copy deduction candidate (12.4.1.8), then f' is considered to
be so as well.
* If f was generated from a deduction-guide (12.4.1.8), then f' is
considered to be so as well.
* The explicit-specifier of f' is the explicit-specifier of g (if
any). */
/* This implementation differs from the above in two significant ways:
1) We include all template parameters of A, not just some.
2) The added constraint is same_type instead of deducible.
I believe that while it's probably possible to construct a testcase that
behaves differently with this simplification, it should have the same
effect for real uses. Including all template parameters means that we
deduce all parameters of A when resolving the call, so when we're in the
constraint we don't need to deduce them again, we can just check whether
the deduction produced the desired result. */
tsubst_flags_t complain = tf_warning_or_error;
tree atype = TREE_TYPE (tmpl);
tree aguides = NULL_TREE;
tree atparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl));
unsigned natparms = TREE_VEC_LENGTH (atparms);
tree utype = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
for (ovl_iterator iter (uguides); iter; ++iter)
{
tree f = *iter;
tree in_decl = f;
location_t loc = DECL_SOURCE_LOCATION (f);
tree ret = TREE_TYPE (TREE_TYPE (f));
tree fprime = f;
if (TREE_CODE (f) == TEMPLATE_DECL)
{
processing_template_decl_sentinel ptds (/*reset*/false);
++processing_template_decl;
/* Deduce template arguments for f from the type-id of A. */
tree ftparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (f));
unsigned len = TREE_VEC_LENGTH (ftparms);
tree targs = make_tree_vec (len);
int err = unify (ftparms, targs, ret, utype, UNIFY_ALLOW_NONE, false);
gcc_assert (!err);
/* The number of parms for f' is the number of parms for A plus
non-deduced parms of f. */
unsigned ndlen = 0;
unsigned j;
for (unsigned i = 0; i < len; ++i)
if (TREE_VEC_ELT (targs, i) == NULL_TREE)
++ndlen;
tree gtparms = make_tree_vec (natparms + ndlen);
/* First copy over the parms of A. */
for (j = 0; j < natparms; ++j)
TREE_VEC_ELT (gtparms, j) = TREE_VEC_ELT (atparms, j);
/* Now rewrite the non-deduced parms of f. */
for (unsigned i = 0; ndlen && i < len; ++i)
if (TREE_VEC_ELT (targs, i) == NULL_TREE)
{
--ndlen;
unsigned index = j++;
unsigned level = 1;
tree oldlist = TREE_VEC_ELT (ftparms, i);
tree list = rewrite_tparm_list (oldlist, index, level,
targs, i, complain);
TREE_VEC_ELT (gtparms, index) = list;
}
gtparms = build_tree_list (size_one_node, gtparms);
/* Substitute the deduced arguments plus the rewritten template
parameters into f to get g. This covers the type, copyness,
guideness, and explicit-specifier. */
tree g = tsubst_decl (DECL_TEMPLATE_RESULT (f), targs, complain);
if (g == error_mark_node)
return error_mark_node;
DECL_USE_TEMPLATE (g) = 0;
fprime = build_template_decl (g, gtparms, false);
DECL_TEMPLATE_RESULT (fprime) = g;
TREE_TYPE (fprime) = TREE_TYPE (g);
tree gtargs = template_parms_to_args (gtparms);
DECL_TEMPLATE_INFO (g) = build_template_info (fprime, gtargs);
DECL_PRIMARY_TEMPLATE (fprime) = fprime;
/* Substitute the associated constraints. */
tree ci = get_constraints (f);
if (ci)
ci = tsubst_constraint_info (ci, targs, complain, in_decl);
if (ci == error_mark_node)
return error_mark_node;
/* Add a constraint that the return type matches the instantiation of
A with the same template arguments. */
ret = TREE_TYPE (TREE_TYPE (fprime));
if (!same_type_p (atype, ret)
/* FIXME this should mean they don't compare as equivalent. */
|| dependent_alias_template_spec_p (atype, nt_opaque))
{
tree same = finish_trait_expr (loc, CPTK_IS_SAME_AS, atype, ret);
ci = append_constraint (ci, same);
}
if (ci)
set_constraints (fprime, ci);
}
else
{
/* For a non-template deduction guide, if the arguments of A aren't
deducible from the return type, don't add the candidate. */
tree targs = make_tree_vec (natparms);
int err = unify (atparms, targs, utype, ret, UNIFY_ALLOW_NONE, false);
for (unsigned i = 0; !err && i < natparms; ++i)
if (TREE_VEC_ELT (targs, i) == NULL_TREE)
err = true;
if (err)
continue;
}
aguides = lookup_add (fprime, aguides);
}
return aguides;
}
/* Return artificial deduction guides built from the constructors of class
template TMPL. */
static tree
ctor_deduction_guides_for (tree tmpl, tsubst_flags_t complain)
{
tree type = TREE_TYPE (tmpl);
tree outer_args = NULL_TREE;
if (DECL_CLASS_SCOPE_P (tmpl)
&& CLASSTYPE_TEMPLATE_INSTANTIATION (DECL_CONTEXT (tmpl)))
{
outer_args = CLASSTYPE_TI_ARGS (DECL_CONTEXT (tmpl));
type = TREE_TYPE (most_general_template (tmpl));
}
tree cands = NULL_TREE;
for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (type)); iter; ++iter)
{
/* Skip inherited constructors. */
if (iter.using_p ())
continue;
tree guide = build_deduction_guide (type, *iter, outer_args, complain);
cands = lookup_add (guide, cands);
}
/* Add implicit default constructor deduction guide. */
if (!TYPE_HAS_USER_CONSTRUCTOR (type))
{
tree guide = build_deduction_guide (type, type, outer_args,
complain);
cands = lookup_add (guide, cands);
}
/* Add copy guide. */
{
tree gtype = build_reference_type (type);
tree guide = build_deduction_guide (type, gtype, outer_args,
complain);
cands = lookup_add (guide, cands);
}
return cands;
}
static GTY((deletable)) hash_map<tree, tree_pair_p> *dguide_cache;
/* Return the non-aggregate deduction guides for deducible template TMPL. The
aggregate candidate is added separately because it depends on the
initializer. Set ANY_DGUIDES_P if we find a non-implicit deduction
guide. */
static tree
deduction_guides_for (tree tmpl, bool &any_dguides_p, tsubst_flags_t complain)
{
tree guides = NULL_TREE;
if (DECL_ALIAS_TEMPLATE_P (tmpl))
{
tree under = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
tree tinfo = get_template_info (under);
guides = deduction_guides_for (TI_TEMPLATE (tinfo), any_dguides_p,
complain);
}
else
{
guides = lookup_qualified_name (CP_DECL_CONTEXT (tmpl),
dguide_name (tmpl),
/*type*/false, /*complain*/false,
/*hidden*/false);
if (guides == error_mark_node)
guides = NULL_TREE;
else
any_dguides_p = true;
}
/* Cache the deduction guides for a template. We also remember the result of
lookup, and rebuild everything if it changes; should be very rare. */
tree_pair_p cache = NULL;
if (tree_pair_p &r
= hash_map_safe_get_or_insert<hm_ggc> (dguide_cache, tmpl))
{
cache = r;
if (cache->purpose == guides)
return cache->value;
}
else
{
r = cache = ggc_cleared_alloc<tree_pair_s> ();
cache->purpose = guides;
}
tree cands = NULL_TREE;
if (DECL_ALIAS_TEMPLATE_P (tmpl))
cands = alias_ctad_tweaks (tmpl, guides);
else
{
cands = ctor_deduction_guides_for (tmpl, complain);
for (ovl_iterator it (guides); it; ++it)
cands = lookup_add (*it, cands);
}
cache->value = cands;
return cands;
}
/* Return whether TMPL is a (class template argument-) deducible template. */
bool
ctad_template_p (tree tmpl)
{
/* A deducible template is either a class template or is an alias template
whose defining-type-id is of the form
typename(opt) nested-name-specifier(opt) template(opt) simple-template-id
where the nested-name-specifier (if any) is non-dependent and the
template-name of the simple-template-id names a deducible template. */
if (DECL_CLASS_TEMPLATE_P (tmpl)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl))
return true;
if (!DECL_ALIAS_TEMPLATE_P (tmpl))
return false;
tree orig = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
if (tree tinfo = get_template_info (orig))
return ctad_template_p (TI_TEMPLATE (tinfo));
return false;
}
/* Deduce template arguments for the class template placeholder PTYPE for
template TMPL based on the initializer INIT, and return the resulting
type. */
static tree
do_class_deduction (tree ptype, tree tmpl, tree init,
int flags, tsubst_flags_t complain)
{
/* We should have handled this in the caller. */
if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl))
return ptype;
/* Initializing one placeholder from another. */
if (init && TREE_CODE (init) == TEMPLATE_PARM_INDEX
&& is_auto (TREE_TYPE (init))
&& CLASS_PLACEHOLDER_TEMPLATE (TREE_TYPE (init)) == tmpl)
return cp_build_qualified_type (TREE_TYPE (init), cp_type_quals (ptype));
/* Look through alias templates that just rename another template. */
tmpl = get_underlying_template (tmpl);
if (!ctad_template_p (tmpl))
{
if (complain & tf_error)
error ("non-deducible template %qT used without template arguments", tmpl);
return error_mark_node;
}
else if (cxx_dialect < cxx2a && DECL_ALIAS_TEMPLATE_P (tmpl))
{
/* This doesn't affect conforming C++17 code, so just pedwarn. */
if (complain & tf_warning_or_error)
pedwarn (input_location, 0, "alias template deduction only available "
"with %<-std=c++2a%> or %<-std=gnu++2a%>");
}
tree type = TREE_TYPE (tmpl);
bool try_list_ctor = false;
releasing_vec rv_args = NULL;
vec<tree,va_gc> *&args = *&rv_args;
if (init == NULL_TREE)
args = make_tree_vector ();
else if (BRACE_ENCLOSED_INITIALIZER_P (init))
{
try_list_ctor = TYPE_HAS_LIST_CTOR (type);
if (try_list_ctor && CONSTRUCTOR_NELTS (init) == 1)
{
/* As an exception, the first phase in 16.3.1.7 (considering the
initializer list as a single argument) is omitted if the
initializer list consists of a single expression of type cv U,
where U is a specialization of C or a class derived from a
specialization of C. */
tree elt = CONSTRUCTOR_ELT (init, 0)->value;
if (is_spec_or_derived (TREE_TYPE (elt), tmpl))
try_list_ctor = false;
}
if (try_list_ctor || is_std_init_list (type))
args = make_tree_vector_single (init);
else
args = make_tree_vector_from_ctor (init);
}
else if (TREE_CODE (init) == TREE_LIST)
args = make_tree_vector_from_list (init);
else
args = make_tree_vector_single (init);
/* Do this now to avoid problems with erroneous args later on. */
args = resolve_args (args, complain);
if (args == NULL)
return error_mark_node;
bool any_dguides_p = false;
tree cands = deduction_guides_for (tmpl, any_dguides_p, complain);
if (cands == error_mark_node)
return error_mark_node;
/* Prune explicit deduction guides in copy-initialization context. */
bool elided = false;
if (flags & LOOKUP_ONLYCONVERTING)
{
for (lkp_iterator iter (cands); !elided && iter; ++iter)
if (DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter)))
elided = true;
if (elided)
{
/* Found a nonconverting guide, prune the candidates. */
tree pruned = NULL_TREE;
for (lkp_iterator iter (cands); iter; ++iter)
if (!DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter)))
pruned = lookup_add (*iter, pruned);
cands = pruned;
}
}
if (!any_dguides_p)
if (tree guide = maybe_aggr_guide (tmpl, init, args))
cands = lookup_add (guide, cands);
tree call = error_mark_node;
/* If this is list-initialization and the class has a list constructor, first
try deducing from the list as a single argument, as [over.match.list]. */
tree list_cands = NULL_TREE;
if (try_list_ctor && cands)
for (lkp_iterator iter (cands); iter; ++iter)
{
tree dg = *iter;
if (is_list_ctor (dg))
list_cands = lookup_add (dg, list_cands);
}
if (list_cands)
{
++cp_unevaluated_operand;
call = build_new_function_call (list_cands, &args, tf_decltype);
--cp_unevaluated_operand;
if (call == error_mark_node)
{
/* That didn't work, now try treating the list as a sequence of
arguments. */
release_tree_vector (args);
args = make_tree_vector_from_ctor (init);
}
}
if (elided && !cands)
{
error ("cannot deduce template arguments for copy-initialization"
" of %qT, as it has no non-explicit deduction guides or "
"user-declared constructors", type);
return error_mark_node;
}
else if (!cands && call == error_mark_node)
{
error ("cannot deduce template arguments of %qT, as it has no viable "
"deduction guides", type);
return error_mark_node;
}
if (call == error_mark_node)
{
++cp_unevaluated_operand;
call = build_new_function_call (cands, &args, tf_decltype);
--cp_unevaluated_operand;
}
if (call == error_mark_node
&& (complain & tf_warning_or_error))
{
error ("class template argument deduction failed:");
++cp_unevaluated_operand;
call = build_new_function_call (cands, &args, complain | tf_decltype);
--cp_unevaluated_operand;
if (elided)
inform (input_location, "explicit deduction guides not considered "
"for copy-initialization");
}
return cp_build_qualified_type (TREE_TYPE (call), cp_type_quals (ptype));
}
/* Replace occurrences of 'auto' in TYPE with the appropriate type deduced
from INIT. AUTO_NODE is the TEMPLATE_TYPE_PARM used for 'auto' in TYPE.
The CONTEXT determines the context in which auto deduction is performed
and is used to control error diagnostics. FLAGS are the LOOKUP_* flags.
OUTER_TARGS are used during template argument deduction
(context == adc_unify) to properly substitute the result, and is ignored
in other contexts.
For partial-concept-ids, extra args may be appended to the list of deduced
template arguments prior to determining constraint satisfaction. */
tree
do_auto_deduction (tree type, tree init, tree auto_node,
tsubst_flags_t complain, auto_deduction_context context,
tree outer_targs, int flags)
{
tree targs;
if (init == error_mark_node)
return error_mark_node;
if (init && type_dependent_expression_p (init)
&& context != adc_unify)
/* Defining a subset of type-dependent expressions that we can deduce
from ahead of time isn't worth the trouble. */
return type;
/* Similarly, we can't deduce from another undeduced decl. */
if (init && undeduced_auto_decl (init))
return type;
/* We may be doing a partial substitution, but we still want to replace
auto_node. */
complain &= ~tf_partial;
if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node))
/* C++17 class template argument deduction. */
return do_class_deduction (type, tmpl, init, flags, complain);
if (init == NULL_TREE || TREE_TYPE (init) == NULL_TREE)
/* Nothing we can do with this, even in deduction context. */
return type;
/* [dcl.spec.auto]: Obtain P from T by replacing the occurrences of auto
with either a new invented type template parameter U or, if the
initializer is a braced-init-list (8.5.4), with
std::initializer_list<U>. */
if (BRACE_ENCLOSED_INITIALIZER_P (init))
{
if (!DIRECT_LIST_INIT_P (init))
type = listify_autos (type, auto_node);
else if (CONSTRUCTOR_NELTS (init) == 1)
init = CONSTRUCTOR_ELT (init, 0)->value;
else
{
if (complain & tf_warning_or_error)
{
if (permerror (input_location, "direct-list-initialization of "
"%<auto%> requires exactly one element"))
inform (input_location,
"for deduction to %<std::initializer_list%>, use copy-"
"list-initialization (i.e. add %<=%> before the %<{%>)");
}
type = listify_autos (type, auto_node);
}
}
if (type == error_mark_node)
return error_mark_node;
init = resolve_nondeduced_context (init, complain);
if (context == adc_decomp_type
&& auto_node == type
&& init != error_mark_node
&& TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE)
/* [dcl.decomp]/1 - if decomposition declaration has no ref-qualifiers
and initializer has array type, deduce cv-qualified array type. */
return cp_build_qualified_type_real (TREE_TYPE (init), TYPE_QUALS (type),
complain);
else if (AUTO_IS_DECLTYPE (auto_node))
{
tree stripped_init = tree_strip_any_location_wrapper (init);
bool id = (DECL_P (stripped_init)
|| ((TREE_CODE (init) == COMPONENT_REF
|| TREE_CODE (init) == SCOPE_REF)
&& !REF_PARENTHESIZED_P (init)));
targs = make_tree_vec (1);
TREE_VEC_ELT (targs, 0)
= finish_decltype_type (init, id, tf_warning_or_error);
if (type != auto_node)
{
if (complain & tf_error)
error ("%qT as type rather than plain %<decltype(auto)%>", type);
return error_mark_node;
}
}
else
{
if (error_operand_p (init))
return error_mark_node;
tree parms = build_tree_list (NULL_TREE, type);
tree tparms;
if (flag_concepts)
tparms = extract_autos (type);
else
{
tparms = make_tree_vec (1);
TREE_VEC_ELT (tparms, 0)
= build_tree_list (NULL_TREE, TYPE_NAME (auto_node));
}
targs = make_tree_vec (TREE_VEC_LENGTH (tparms));
int val = type_unification_real (tparms, targs, parms, &init, 1, 0,
DEDUCE_CALL,
NULL, /*explain_p=*/false);
if (val > 0)
{
if (processing_template_decl)
/* Try again at instantiation time. */
return type;
if (type && type != error_mark_node
&& (complain & tf_error))
/* If type is error_mark_node a diagnostic must have been
emitted by now. Also, having a mention to '<type error>'
in the diagnostic is not really useful to the user. */
{
if (cfun
&& FNDECL_USED_AUTO (current_function_decl)
&& (auto_node
== DECL_SAVED_AUTO_RETURN_TYPE (current_function_decl))
&& LAMBDA_FUNCTION_P (current_function_decl))
error ("unable to deduce lambda return type from %qE", init);
else
error ("unable to deduce %qT from %qE", type, init);
type_unification_real (tparms, targs, parms, &init, 1, 0,
DEDUCE_CALL,
NULL, /*explain_p=*/true);
}
return error_mark_node;
}
}
/* Check any placeholder constraints against the deduced type. */
if (flag_concepts && !processing_template_decl)
if (tree check = NON_ERROR (PLACEHOLDER_TYPE_CONSTRAINTS (auto_node)))
{
/* Use the deduced type to check the associated constraints. If we
have a partial-concept-id, rebuild the argument list so that
we check using the extra arguments. */
check = unpack_concept_check (check);
gcc_assert (TREE_CODE (check) == TEMPLATE_ID_EXPR);
tree cdecl = TREE_OPERAND (check, 0);
if (OVL_P (cdecl))
cdecl = OVL_FIRST (cdecl);
tree cargs = TREE_OPERAND (check, 1);
if (TREE_VEC_LENGTH (cargs) > 1)
{
cargs = copy_node (cargs);
TREE_VEC_ELT (cargs, 0) = TREE_VEC_ELT (targs, 0);
}
else
cargs = targs;
/* Rebuild the check using the deduced arguments. */
check = build_concept_check (cdecl, cargs, tf_none);
if (!constraints_satisfied_p (check))
{
if (complain & tf_warning_or_error)
{
auto_diagnostic_group d;
switch (context)
{
case adc_unspecified:
case adc_unify:
error("placeholder constraints not satisfied");
break;
case adc_variable_type:
case adc_decomp_type:
error ("deduced initializer does not satisfy "
"placeholder constraints");
break;
case adc_return_type:
error ("deduced return type does not satisfy "
"placeholder constraints");
break;
case adc_requirement:
error ("deduced expression type does not satisfy "
"placeholder constraints");
break;
}
diagnose_constraints (input_location, check, targs);
}
return error_mark_node;
}
}
if (processing_template_decl && context != adc_unify)
outer_targs = current_template_args ();
targs = add_to_template_args (outer_targs, targs);
return tsubst (type, targs, complain, NULL_TREE);
}
/* Substitutes LATE_RETURN_TYPE for 'auto' in TYPE and returns the
result. */
tree
splice_late_return_type (tree type, tree late_return_type)
{
if (late_return_type)
{
gcc_assert (is_auto (type) || seen_error ());
return late_return_type;
}
if (tree auto_node = find_type_usage (type, is_auto))
if (TEMPLATE_TYPE_LEVEL (auto_node) <= processing_template_decl)
{
/* In an abbreviated function template we didn't know we were dealing
with a function template when we saw the auto return type, so rebuild
the return type using an auto with the correct level. */
tree new_auto = make_auto_1 (TYPE_IDENTIFIER (auto_node), false);
tree auto_vec = make_tree_vec (1);
TREE_VEC_ELT (auto_vec, 0) = new_auto;
tree targs = add_outermost_template_args (current_template_args (),
auto_vec);
/* FIXME: We should also rebuild the constraint to refer to the new
auto. */
PLACEHOLDER_TYPE_CONSTRAINTS (new_auto)
= PLACEHOLDER_TYPE_CONSTRAINTS (auto_node);
TYPE_CANONICAL (new_auto) = canonical_type_parameter (new_auto);
return tsubst (type, targs, tf_none, NULL_TREE);
}
return type;
}
/* Returns true iff TYPE is a TEMPLATE_TYPE_PARM representing 'auto' or
'decltype(auto)' or a deduced class template. */
bool
is_auto (const_tree type)
{
if (TREE_CODE (type) == TEMPLATE_TYPE_PARM
&& (TYPE_IDENTIFIER (type) == auto_identifier
|| TYPE_IDENTIFIER (type) == decltype_auto_identifier))
return true;
else
return false;
}
/* for_each_template_parm callback for type_uses_auto. */
int
is_auto_r (tree tp, void */*data*/)
{
return is_auto (tp);
}
/* Returns the TEMPLATE_TYPE_PARM in TYPE representing `auto' iff TYPE contains
a use of `auto'. Returns NULL_TREE otherwise. */
tree
type_uses_auto (tree type)
{
if (type == NULL_TREE)
return NULL_TREE;
else if (flag_concepts)
{
/* The Concepts TS allows multiple autos in one type-specifier; just
return the first one we find, do_auto_deduction will collect all of
them. */
if (uses_template_parms (type))
return for_each_template_parm (type, is_auto_r, /*data*/NULL,
/*visited*/NULL, /*nondeduced*/false);
else
return NULL_TREE;
}
else
return find_type_usage (type, is_auto);
}
/* Report ill-formed occurrences of auto types in ARGUMENTS. If
concepts are enabled, auto is acceptable in template arguments, but
only when TEMPL identifies a template class. Return TRUE if any
such errors were reported. */
bool
check_auto_in_tmpl_args (tree tmpl, tree args)
{
/* If there were previous errors, nevermind. */
if (!args || TREE_CODE (args) != TREE_VEC)
return false;
/* If TMPL is an identifier, we're parsing and we can't tell yet
whether TMPL is supposed to be a type, a function or a variable.
We'll only be able to tell during template substitution, so we
expect to be called again then. If concepts are enabled and we
know we have a type, we're ok. */
if (flag_concepts
&& (identifier_p (tmpl)
|| (DECL_P (tmpl)
&& (DECL_TYPE_TEMPLATE_P (tmpl)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)))))
return false;
/* Quickly search for any occurrences of auto; usually there won't
be any, and then we'll avoid allocating the vector. */
if (!type_uses_auto (args))
return false;
bool errors = false;
tree vec = extract_autos (args);
for (int i = 0; i < TREE_VEC_LENGTH (vec); i++)
{
tree xauto = TREE_VALUE (TREE_VEC_ELT (vec, i));
error_at (DECL_SOURCE_LOCATION (xauto),
"invalid use of %qT in template argument", xauto);
errors = true;
}
return errors;
}
/* For a given template T, return the vector of typedefs referenced
in T for which access check is needed at T instantiation time.
T is either a FUNCTION_DECL or a RECORD_TYPE.
Those typedefs were added to T by the function
append_type_to_template_for_access_check. */
vec<qualified_typedef_usage_t, va_gc> *
get_types_needing_access_check (tree t)
{
tree ti;
vec<qualified_typedef_usage_t, va_gc> *result = NULL;
if (!t || t == error_mark_node)
return NULL;
if (!(ti = get_template_info (t)))
return NULL;
if (CLASS_TYPE_P (t)
|| TREE_CODE (t) == FUNCTION_DECL)
{
if (!TI_TEMPLATE (ti))
return NULL;
result = TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti);
}
return result;
}
/* Append the typedef TYPE_DECL used in template T to a list of typedefs
tied to T. That list of typedefs will be access checked at
T instantiation time.
T is either a FUNCTION_DECL or a RECORD_TYPE.
TYPE_DECL is a TYPE_DECL node representing a typedef.
SCOPE is the scope through which TYPE_DECL is accessed.
LOCATION is the location of the usage point of TYPE_DECL.
This function is a subroutine of
append_type_to_template_for_access_check. */
static void
append_type_to_template_for_access_check_1 (tree t,
tree type_decl,
tree scope,
location_t location)
{
qualified_typedef_usage_t typedef_usage;
tree ti;
if (!t || t == error_mark_node)
return;
gcc_assert ((TREE_CODE (t) == FUNCTION_DECL
|| CLASS_TYPE_P (t))
&& type_decl
&& TREE_CODE (type_decl) == TYPE_DECL
&& scope);
if (!(ti = get_template_info (t)))
return;
gcc_assert (TI_TEMPLATE (ti));
typedef_usage.typedef_decl = type_decl;
typedef_usage.context = scope;
typedef_usage.locus = location;
vec_safe_push (TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti), typedef_usage);
}
/* Append TYPE_DECL to the template TEMPL.
TEMPL is either a class type, a FUNCTION_DECL or a TEMPLATE_DECL.
At TEMPL instanciation time, TYPE_DECL will be checked to see
if it can be accessed through SCOPE.
LOCATION is the location of the usage point of TYPE_DECL.
e.g. consider the following code snippet:
class C
{
typedef int myint;
};
template<class U> struct S
{
C::myint mi; // <-- usage point of the typedef C::myint
};
S<char> s;
At S<char> instantiation time, we need to check the access of C::myint
In other words, we need to check the access of the myint typedef through
the C scope. For that purpose, this function will add the myint typedef
and the scope C through which its being accessed to a list of typedefs
tied to the template S. That list will be walked at template instantiation
time and access check performed on each typedefs it contains.
Note that this particular code snippet should yield an error because
myint is private to C. */
void
append_type_to_template_for_access_check (tree templ,
tree type_decl,
tree scope,
location_t location)
{
qualified_typedef_usage_t *iter;
unsigned i;
gcc_assert (type_decl && (TREE_CODE (type_decl) == TYPE_DECL));
/* Make sure we don't append the type to the template twice. */
FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (templ), i, iter)
if (iter->typedef_decl == type_decl && scope == iter->context)
return;
append_type_to_template_for_access_check_1 (templ, type_decl,
scope, location);
}
/* Recursively walk over && expressions searching for EXPR. Return a reference
to that expression. */
static tree *find_template_requirement (tree *t, tree key)
{
if (*t == key)
return t;
if (TREE_CODE (*t) == TRUTH_ANDIF_EXPR)
{
if (tree *p = find_template_requirement (&TREE_OPERAND (*t, 0), key))
return p;
if (tree *p = find_template_requirement (&TREE_OPERAND (*t, 1), key))
return p;
}
return 0;
}
/* Convert the generic type parameters in PARM that match the types given in the
range [START_IDX, END_IDX) from the current_template_parms into generic type
packs. */
tree
convert_generic_types_to_packs (tree parm, int start_idx, int end_idx)
{
tree current = current_template_parms;
int depth = TMPL_PARMS_DEPTH (current);
current = INNERMOST_TEMPLATE_PARMS (current);
tree replacement = make_tree_vec (TREE_VEC_LENGTH (current));
for (int i = 0; i < start_idx; ++i)
TREE_VEC_ELT (replacement, i)
= TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i)));
for (int i = start_idx; i < end_idx; ++i)
{
/* Create a distinct parameter pack type from the current parm and add it
to the replacement args to tsubst below into the generic function
parameter. */
tree node = TREE_VEC_ELT (current, i);
tree o = TREE_TYPE (TREE_VALUE (node));
tree t = copy_type (o);
TEMPLATE_TYPE_PARM_INDEX (t)
= reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (o),
t, 0, 0, tf_none);
TREE_TYPE (TEMPLATE_TYPE_DECL (t)) = t;
TYPE_STUB_DECL (t) = TYPE_NAME (t) = TEMPLATE_TYPE_DECL (t);
TYPE_MAIN_VARIANT (t) = t;
TEMPLATE_TYPE_PARAMETER_PACK (t) = true;
TYPE_CANONICAL (t) = canonical_type_parameter (t);
TREE_VEC_ELT (replacement, i) = t;
/* Replace the current template parameter with new pack. */
TREE_VALUE (node) = TREE_CHAIN (t);
/* Surgically adjust the associated constraint of adjusted parameter
and it's corresponding contribution to the current template
requirements. */
if (tree constr = TEMPLATE_PARM_CONSTRAINTS (node))
{
tree id = unpack_concept_check (constr);
TREE_VEC_ELT (TREE_OPERAND (id, 1), 0) = t;
tree fold = finish_left_unary_fold_expr (constr, TRUTH_ANDIF_EXPR);
TEMPLATE_PARM_CONSTRAINTS (node) = fold;
/* If there was a constraint, we also need to replace that in
the template requirements, which we've already built. */
tree *reqs = &TEMPLATE_PARMS_CONSTRAINTS (current_template_parms);
reqs = find_template_requirement (reqs, constr);
*reqs = fold;
}
}
for (int i = end_idx, e = TREE_VEC_LENGTH (current); i < e; ++i)
TREE_VEC_ELT (replacement, i)
= TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i)));
/* If there are more levels then build up the replacement with the outer
template parms. */
if (depth > 1)
replacement = add_to_template_args (template_parms_to_args
(TREE_CHAIN (current_template_parms)),
replacement);
return tsubst (parm, replacement, tf_none, NULL_TREE);
}
/* __integer_pack(N) in a pack expansion expands to a sequence of numbers from
0..N-1. */
void
declare_integer_pack (void)
{
tree ipfn = push_library_fn (get_identifier ("__integer_pack"),
build_function_type_list (integer_type_node,
integer_type_node,
NULL_TREE),
NULL_TREE, ECF_CONST);
DECL_DECLARED_CONSTEXPR_P (ipfn) = true;
set_decl_built_in_function (ipfn, BUILT_IN_FRONTEND,
CP_BUILT_IN_INTEGER_PACK);
}
/* Set up the hash tables for template instantiations. */
void
init_template_processing (void)
{
/* FIXME: enable sanitization (PR87847) */
decl_specializations = hash_table<spec_hasher>::create_ggc (37, false);
type_specializations = hash_table<spec_hasher>::create_ggc (37, false);
if (cxx_dialect >= cxx11)
declare_integer_pack ();
}
/* Print stats about the template hash tables for -fstats. */
void
print_template_statistics (void)
{
fprintf (stderr, "decl_specializations: size %ld, %ld elements, "
"%f collisions\n", (long) decl_specializations->size (),
(long) decl_specializations->elements (),
decl_specializations->collisions ());
fprintf (stderr, "type_specializations: size %ld, %ld elements, "
"%f collisions\n", (long) type_specializations->size (),
(long) type_specializations->elements (),
type_specializations->collisions ());
}
#if CHECKING_P
namespace selftest {
/* Verify that build_non_dependent_expr () works, for various expressions,
and that location wrappers don't affect the results. */
static void
test_build_non_dependent_expr ()
{
location_t loc = BUILTINS_LOCATION;
/* Verify constants, without and with location wrappers. */
tree int_cst = build_int_cst (integer_type_node, 42);
ASSERT_EQ (int_cst, build_non_dependent_expr (int_cst));
tree wrapped_int_cst = maybe_wrap_with_location (int_cst, loc);
ASSERT_TRUE (location_wrapper_p (wrapped_int_cst));
ASSERT_EQ (wrapped_int_cst, build_non_dependent_expr (wrapped_int_cst));
tree string_lit = build_string (4, "foo");
TREE_TYPE (string_lit) = char_array_type_node;
string_lit = fix_string_type (string_lit);
ASSERT_EQ (string_lit, build_non_dependent_expr (string_lit));
tree wrapped_string_lit = maybe_wrap_with_location (string_lit, loc);
ASSERT_TRUE (location_wrapper_p (wrapped_string_lit));
ASSERT_EQ (wrapped_string_lit,
build_non_dependent_expr (wrapped_string_lit));
}
/* Verify that type_dependent_expression_p () works correctly, even
in the presence of location wrapper nodes. */
static void
test_type_dependent_expression_p ()
{
location_t loc = BUILTINS_LOCATION;
tree name = get_identifier ("foo");
/* If no templates are involved, nothing is type-dependent. */
gcc_assert (!processing_template_decl);
ASSERT_FALSE (type_dependent_expression_p (name));
++processing_template_decl;
/* Within a template, an unresolved name is always type-dependent. */
ASSERT_TRUE (type_dependent_expression_p (name));
/* Ensure it copes with NULL_TREE and errors. */
ASSERT_FALSE (type_dependent_expression_p (NULL_TREE));
ASSERT_FALSE (type_dependent_expression_p (error_mark_node));
/* A USING_DECL in a template should be type-dependent, even if wrapped
with a location wrapper (PR c++/83799). */
tree using_decl = build_lang_decl (USING_DECL, name, NULL_TREE);
TREE_TYPE (using_decl) = integer_type_node;
ASSERT_TRUE (type_dependent_expression_p (using_decl));
tree wrapped_using_decl = maybe_wrap_with_location (using_decl, loc);
ASSERT_TRUE (location_wrapper_p (wrapped_using_decl));
ASSERT_TRUE (type_dependent_expression_p (wrapped_using_decl));
--processing_template_decl;
}
/* Run all of the selftests within this file. */
void
cp_pt_c_tests ()
{
test_build_non_dependent_expr ();
test_type_dependent_expression_p ();
}
} // namespace selftest
#endif /* #if CHECKING_P */
#include "gt-cp-pt.h"
|
shader.h | #ifndef SHADER_H
#define SHADER_H
// ===============================
// AUTHOR : Angel Ortiz (angelo12 AT vt DOT edu)
// CREATE DATE : 2018-07-12
// PURPOSE : Emulate modern programmable vertex and fragment shaders. Allow texture
// reading and full Physically based rendering models.
// ===============================
// SPECIAL NOTES: I kpet the older shaders that I wrote while working towards
// building the final PBR model because I thought it would be nice to see the progression
// Although using pure interface classes would seem to incur a perforamnce
// penalty through pointer chasing I did not measure it through profiling.
// ===============================
//Headers
#include "vector3D.h"
#include "matrix.h"
#include "texture.h"
#include <math.h>
//Shader Interface for a class that emulates modern GPU fragment and vertex shaders
struct IShader {
virtual ~IShader() {};
virtual Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light = Vector3f{1,1,1}) = 0;
virtual Vector3f fragment(float u, float v) = 0;
};
//Simplest shader. Calculates light intensity per triangle.
struct FlatShader : public IShader {
Matrix4 MVP, MV;
float varIntensity;
Vector3f rgb{255,255,255};
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override
{
varIntensity = std::max(0.0f,normal.dotProduct(light));
return MVP.matMultVec(vertex); //Transforms verts into projected space
}
Vector3f fragment(float u, float v) override{
return rgb*varIntensity;
}
};
//More Complex shader that calculates a per-vertex intensity and interpolates
//through the fragments of the triangle
struct GouraudShader : public IShader {
//Per object data
Matrix4 MVP, MV, V, N;
Vector3f lightColor1{1,1,1}, lightColor2{0,0,1}, lightColor3{1,1,1};
float ambientStrength = 0.05, diffStrength = 0, specularStrength = 0.5, spec = 0;
Vector3f rgb{255,255,255};
//Per vertex data
Vector3f varying_diffuse, varying_specular, reflectDir, viewDir, lightDir, correctNormal;
//Per pixel data
Vector3f ambient, diffuse, specular;
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override
{
//Vertex attributes
correctNormal = N.matMultDir(normal).normalized();
lightDir = V.matMultDir(light).normalized();
reflectDir = Vector3f::reflect(-lightDir, correctNormal);
viewDir = MV.matMultVec(vertex).normalized();
//Values to be interpolated
varying_specular.data[index] = std::pow( std::max( -viewDir.dotProduct(reflectDir), 0.0f), 32.0f);
varying_diffuse.data[index] = std::max(0.0f, (correctNormal).dotProduct(-lightDir));
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolating
diffStrength = varying_diffuse.x + u*(varying_diffuse.y - varying_diffuse.x) + v*(varying_diffuse.z - varying_diffuse.x);
spec = varying_specular.x + u*(varying_specular.y - varying_specular.x) + v*(varying_specular.z - varying_specular.x);
//Phong reflection model
ambient = lightColor1 * ambientStrength;
diffuse = lightColor2 * diffStrength;
specular = lightColor3 * (specularStrength * spec);
return (ambient + diffuse + specular) * rgb;
}
};
//Even more complex shader that interpolates normals and calculates intensities per fragment instead
//instead of per vertex.
struct PhongShader : public IShader {
//Per object data
Matrix4 MVP, MV, V, N;
float ambientStrength = 0.05, diffStrength = 0, specularStrength = 0.9, spec = 0;
Vector3f lightColor{0,0.1,1},lightColorSpec{1,1,1};
Vector3f rgb{255,255,255};
//Per vertex data
Vector3f normals[3], viewDir[3];
Vector3f varying_diffuse, varying_specular, reflectDir, lightDir;
//Per pixel data
Vector3f ambient, diffuse, specular, interpNormal, interpViewDir;
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override
{
//Vertex attributes
normals[index] = N.matMultDir(normal).normalized();
viewDir[index] = MV.matMultVec(vertex).normalized();
lightDir = V.matMultDir(light).normalized();
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolated stuff
interpNormal = normals[0] + (normals[1] - normals[0])* u + (normals[2] - normals[0]) * v;
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
//Ambient
ambient = lightColor * ambientStrength;
//Diffuse
diffStrength = std::max(0.0f, (interpNormal.normalized()).dotProduct(lightDir));
diffuse = lightColor * diffStrength;
//Specular
reflectDir = Vector3f::reflect(-lightDir, interpNormal);
spec = std::pow( std::max( (-interpViewDir.normalized()).dotProduct(reflectDir), 0.0f), 50.0f);
specular = lightColorSpec * (specularStrength * spec);
return (ambient + diffuse + specular) * rgb;
}
};
//Optimized version of Phong shader that uses a half angle instead of individual reflection
//angles
struct BlinnPhongShader : public IShader {
//Per object data
Texture *albedoT;
Matrix4 MVP, MV, V, N;
float ambientStrength = 0.05, diffStrength=1 , specularStrength= 0.5;
Vector3f lightColor{1,1,1};
//Per vertex data
Vector3f normals[3], viewDir[3], UV[3];
float diff, spec, shininess = 128;
//Per fragment data
Vector3f ambient, diffuse, specular, interpNormal, interpViewDir, interpUV;
Vector3f halfwayDir, lightDir;
Vector3f interpCol, white{255,255,255};
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals,const Vector3f &tangent,
int index, const Vector3f &light) override{
normals[index] = N.matMultDir(normal).normalized();
UV[index] = textureVals;
viewDir[index] = MV.matMultVec(vertex).normalized();
lightDir = V.matMultDir(light).normalized();
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolated stuff
interpNormal = (normals[0] + (normals[1] - normals[0])* u + (normals[2] - normals[0]) * v).normalized();
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
interpUV = UV[0] + (UV[1] - UV[0])* u + (UV[2] - UV[0]) * v;
//Albedo
interpCol = albedoT->getPixelVal(interpUV.x, interpUV.y);
//Ambient
ambient = lightColor * ambientStrength;
//Diffuse
diff = std::max(0.0f, interpNormal.dotProduct(lightDir));
diffuse = lightColor * diff * diffStrength;
//Specular
halfwayDir = (lightDir - interpViewDir).normalized();
spec = std::pow(std::max(0.0f, interpNormal.dotProduct(halfwayDir)), shininess);
specular = lightColor * spec * specularStrength;
return (ambient + diffuse) * interpCol + specular * white;
}
};
// Shader that uses texture mapping extensively
struct TextureMapShader : public IShader {
//Variables set per model
Texture *albedoT, *normalT, *ambientOT;
Matrix4 MVP, MV, V, M, N;
Vector3f cameraPos;
//Light Variables
Vector3f lightColor{1,1,1}, white{1,1,1};
float ambientStrength = 0.1, diffStrength = 0.9, specularStrength = 0.8;
float diff, spec, shininess = 128;
Vector3f lightDir[3];
//Variables set per vertex
Vector3f viewDir[3], texCoords[3];
Vector3f normal_WS, tangent_WS, biTangent_WS;
Matrix4 TBN;
//Interpolated variables
Vector3f interpCoords, interpLightDir, interpNormal,
interpViewDir, interpCol, interpAO;
//Per fragment
Vector3f ambient, diffuse, specular ;
Vector3f halfwayDir;
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals, const Vector3f &tangent,
int index, const Vector3f &light) override{
//Creating TBN matrix
normal_WS = N.matMultDir(normal).normalized();
tangent_WS = N.matMultDir(tangent).normalized();
biTangent_WS = normal_WS.crossProduct(tangent_WS);
TBN = Matrix4::TBNMatrix(tangent_WS, biTangent_WS, normal_WS);
//Getting UV coordinates for use in both albedo and normal textures
texCoords[index] = textureVals;
//Passing all lighting related data to tangent space
lightDir[index] = TBN.matMultVec(light);
viewDir[index] = TBN.matMultVec(cameraPos - M.matMultVec(vertex));
return MVP.matMultVec(vertex);
}
Vector3f fragment(float u, float v) override{
//Interpolated attributes
interpCoords = texCoords[0] + (texCoords[1] - texCoords[0])* u + (texCoords[2] - texCoords[0]) * v;
interpLightDir = lightDir[0] + (lightDir[1] - lightDir[0])* u + (lightDir[2] - lightDir[0]) * v;
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
//Reading albedo and normal data from textures
interpCol = albedoT->getPixelVal(interpCoords.x, interpCoords.y);
interpAO = ambientOT->getIntensityVal(interpCoords.x, interpCoords.y);
interpNormal = normalT->getPixelVal(interpCoords.x, interpCoords.y);
interpNormal = interpNormal.normalized();
//Ambient
ambient = lightColor * ambientStrength * interpAO;
//Diffuse
diff = std::max(0.0f, interpNormal.dotProduct(interpLightDir));
diffuse = lightColor * diff * diffStrength;
//Specular
halfwayDir = (interpLightDir + interpViewDir).normalized();
spec = std::pow(std::max(0.0f, interpNormal.dotProduct(halfwayDir)), shininess);
specular = lightColor * spec * specularStrength;
return (ambient + diffuse) * interpCol + specular * white;
}
};
// Shader that uses texture mapping and a PBR approach for shading
// Uses a cook-torrance BRDF for direct light sources.
struct PBRShader : public IShader {
//Variables set per model
Texture *albedoT, *normalT, *ambientOT, *roughT, *metalT;
Matrix4 MVP, MV, V, M, N;
Vector3f cameraPos;
//Light Variables
Vector3f F0{0.04, 0.04, 0.04}, F0corrected; //Default value dielectric
Vector3f *lightDirVal, *lightCol, *lightPos;
float nDotL, nDotV, ambientInt = 0.01;
int numLights;
//Variables set per vertex
Vector3f viewDir[3], texCoords[3];
Vector3f normal_WS, tangent_WS, biTangent_WS;
Matrix4 TBN;
//Interpolated variables
Vector3f interpCoords, interpNormal, interpViewDir, interpCol;
//Per fragment
Vector3f radianceOut, ambient;
float interpRough, interpAO, interpMetal;
float uTexture, vTexture, intPart;
//BRDF functions
Vector3f fresnelSchlick(float cosTheta, Vector3f &fresnel0 ){
float invcCosTheta = 1.0 - cosTheta;
return fresnel0 + (Vector3f(1.0)- fresnel0) * (invcCosTheta * invcCosTheta * invcCosTheta * invcCosTheta * invcCosTheta);
}
float distributionGGX(Vector3f normal, Vector3f halfway, float roughness){
float a = roughness*roughness;
float a2 = a*a;
float NdotH = std::max(normal.dotProduct(halfway), 0.0f);
float NdotH2 = NdotH*NdotH;
float denom = (NdotH2 * (a2 - 1.0f) + 1.0f);
denom = M_1_PIf32/ (denom * denom);
return a2 * denom;
}
float GeometrySchlickGGX(float Ndot, float roughness){
float r = (roughness + 1.0f);
float k = (r*r) / 8.0f; //Only useful for direct lighting must be changed in ibr
float denom = 1.0f / (Ndot * (1.0f- k) + k);
return Ndot * denom;
}
float GeometrySmith(float roughness, float nDL, float nDV){
return GeometrySchlickGGX(nDL, roughness) * GeometrySchlickGGX(nDV, roughness);
}
//Vertex shader
Vector3f vertex(const Vector3f &vertex, const Vector3f &normal,
const Vector3f &textureVals, const Vector3f &tangent,
int index, const Vector3f &light = Vector3f{1,1,1}) override
{
//Creating TBN matrix
normal_WS = N.matMultDir(normal).normalized();
tangent_WS = N.matMultDir(tangent).normalized();
biTangent_WS = normal_WS.crossProduct(tangent_WS);
TBN = Matrix4::TBNMatrix(tangent_WS, biTangent_WS, normal_WS);
//Getting UV coordinates for use in all textures
texCoords[index] = textureVals;
//Passing all lighting related data to tangent space
for(int lIndex = 0; lIndex < numLights; ++lIndex){
int indc2 = (lIndex*3) + index;
lightDirVal[indc2] = TBN.matMultDir(lightPos[lIndex]);
}
viewDir[index] = TBN.matMultDir(cameraPos - M.matMultVec(vertex));
return MVP.matMultVec(vertex);
}
//Fragment shader
Vector3f fragment(float u, float v) override{
//Interpolated attributes
interpCoords = texCoords[0] + (texCoords[1] - texCoords[0])* u + (texCoords[2] - texCoords[0]) * v;
interpViewDir = viewDir[0] + (viewDir[1] - viewDir[0])* u + (viewDir[2] - viewDir[0]) * v;
//Correcting UV's for tiling
uTexture = std::modf(interpCoords.x, &intPart);
vTexture = std::modf(interpCoords.y, &intPart);
//Reading data from textures for use in lighting calculations
interpCol = albedoT->getPixelVal(uTexture, vTexture);
interpAO = ambientOT->getIntensityVal(uTexture, vTexture);
interpRough = roughT->getIntensityVal(uTexture, vTexture);;
interpMetal = metalT->getIntensityVal(uTexture, vTexture);
interpNormal = normalT->getPixelVal(uTexture, vTexture);
interpNormal = interpNormal.normalized();
interpViewDir = interpViewDir.normalized();
//Varying f0 based on metallicness of surface
float invMetal = (1.0f-interpMetal);
F0corrected = (F0 * invMetal) + (interpCol * interpMetal);
nDotV = std::max(interpNormal.dotProduct(interpViewDir), 0.0f);
//Setting up Direct Lighting variables
const int maxLights = numLights;
//Fresnel, normal distribution function and geometry occlusion
Vector3f F[maxLights];
float NDF[maxLights];
float G[maxLights];
//Storing in array for vectorizing
Vector3f radianceLights[maxLights];
Vector3f interpLightDir[maxLights];
Vector3f halfwayDir[maxLights];
float nDotL[maxLights];
Vector3f numerator[maxLights];
float invDenominator[maxLights];
Vector3f specular[maxLights];
Vector3f kD[maxLights];
//Interpolating each light direction for every light
int val;
for(int i = 0; i < maxLights; ++i ){
val = i*3;
interpLightDir[i] = (lightDirVal[val] + (lightDirVal[val + 1] - lightDirVal[val])* u + (lightDirVal[val + 2] - lightDirVal[val]) * v).normalized();
}
//Per light illumination calculations that can be simdified
//Currently uses widest SIMD array to perform all light iterations in one trip
//Which I believe leaves some extra
#pragma omp simd
for(int light = 0; light < maxLights; ++light ){
halfwayDir[light] = (interpLightDir[light] + interpViewDir);
halfwayDir[light] = halfwayDir[light].normalized();
nDotL[light] = std::fmax(interpNormal.dotProduct(interpLightDir[light]), 0.0f);
//No problem vectorizing these functions because they are inlined by the compiler
//And also only consist of math operations to the vectors
F[light] = fresnelSchlick(std::fmax(halfwayDir[light].dotProduct(interpViewDir), 0.0f), F0corrected);
NDF[light] = distributionGGX(interpNormal, halfwayDir[light], interpRough);
G[light] = GeometrySmith(interpRough, nDotL[light] , nDotV);
numerator[light] = F[light] * G[light]*NDF[light];
invDenominator[light] = 1.0f / std::fmax(4.0f * (nDotL[light] * nDotV), 0.001f);
specular[light] = numerator[light] * invDenominator[light];
//kd is 1 - kf which is the stuff to the right of the vecotr
kD[light] = (Vector3f(1.0f) - F[light])*invMetal;
//The rendering equation result for a given light
radianceLights[light] = (kD[light] * (interpCol * (M_1_PIf32)) + specular[light] ) * nDotL[light] * lightCol[light];
}
//Summing up all radiance values since SIMD won't work if I do this within the
//previous loop
radianceOut.zero();
for(int i = 0; i < maxLights; ++i) {
radianceOut += radianceLights[i];
}
//Simplistic ambient term
ambient = interpCol * (ambientInt * interpAO);
return ambient + radianceOut;
}
};
#endif |
DRB090-static-local-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
For a variable declared in a scope inside an OpenMP construct:
* private if the variable has an automatic storage duration
* shared if the variable has a static storage duration.
Dependence pairs:
tmp@73:5 vs. tmp@73:5
tmp@73:5 vs. tmp@74:12
*/
#include<stdio.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
int a[len], b[len];
for (i=0;i<len;i++)
{ a[i]=i; b[i]=i;}
/* static storage for a local variable */
#pragma omp parallel
{
static int tmp;
#pragma omp for schedule(dynamic)
for (i=0;i<len;i++)
{
tmp = a[i]+i;
a[i] = tmp;
}
}
/* automatic storage for a local variable */
#pragma omp parallel
{
int tmp;
#pragma omp for schedule(dynamic)
for (i=0;i<len;i++)
{
tmp = b[i]+i;
b[i] = tmp;
}
}
printf("a[50]=%d b[50]=%d\n", a[50], b[50]);
return 0;
}
|
GB_unaryop__ainv_uint64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_int64
// op(A') function: GB_tran__ainv_uint64_int64
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_int64
(
uint64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spatial_index.h | /*
* Copyright (c) 2018
* Markus Goetz
*
* This software may be modified and distributed under the terms of MIT-style license.
*
* Description: Indexes the features space to allow fast neighborhood queries
*
* Maintainer: m.goetz
*
* Email: markus.goetz@kit.edu
*/
#ifndef SPATIAL_INDEX_H
#define SPATIAL_INDEX_H
#include <algorithm>
#include <atomic>
#include <cmath>
#include <functional>
#include <hdf5.h>
#include <limits>
#include <numeric>
#include <omp.h>
#include <parallel/algorithm>
#include <vector>
#ifdef WITH_OUTPUT
#include <iostream>
#endif
#include "constants.h"
#include "dataset.h"
#ifdef WITH_MPI
#include <mpi.h>
#include "mpi_util.h"
#endif
struct EPSConfig {
std::vector<size_t> dimensions;
float epsilon = 0.0;
};
template <typename T>
class SpatialIndex {
Dataset& m_data;
std::vector<float> m_epsilon_map;
std::vector<T> m_minimums;
std::vector<T> m_maximums;
std::vector<size_t> m_cell_dimensions;
size_t m_total_cells;
Cell m_last_cell;
Cells m_cells;
CellHistogram m_cell_histogram;
CellIndex m_cell_index;
std::vector<size_t> m_swapped_dimensions;
size_t m_halo;
size_t m_global_point_offset;
std::vector<size_t> m_initial_order;
#ifdef WITH_MPI
int m_rank;
int m_size;
std::vector<CellBounds> m_cell_bounds;
std::vector<ComputeBounds> m_compute_bounds;
#endif
public:
// implementations of the custom omp reduction operations
static void vector_min(std::vector<T>& omp_in, std::vector<T>& omp_out) {
for (size_t index = 0; index < omp_out.size(); ++index) {
omp_out[index] = std::min(omp_in[index], omp_out[index]);
}
}
#pragma omp declare reduction(vector_min: std::vector<T>: vector_min(omp_in, omp_out)) initializer(omp_priv = omp_orig)
static void vector_max(std::vector<T>& omp_in, std::vector<T>& omp_out) {
for (size_t index = 0; index < omp_out.size(); ++index) {
omp_out[index] = std::max(omp_in[index], omp_out[index]);
}
}
#pragma omp declare reduction(vector_max: std::vector<T>: vector_max(omp_in, omp_out)) initializer(omp_priv = omp_orig)
static void merge_histograms(CellHistogram& omp_in, CellHistogram& omp_out) {
for (const auto& cell: omp_in) {
omp_out[cell.first] += cell.second;
}
}
#pragma omp declare reduction(merge_histograms: CellHistogram: merge_histograms(omp_in, omp_out)) initializer(omp_priv = omp_orig)
private:
void compute_initial_order() {
#pragma omp parallel for
for (size_t i = 0; i < m_data.m_chunk[0]; ++i) {
m_initial_order[i] += i + m_data.m_offset[0];
}
}
void compute_space_dimensions() {
const size_t dimensions = m_minimums.size();
const size_t bytes = m_cells.size() * dimensions;
const T* end_point = static_cast<T*>(m_data.m_p) + bytes;
// compute the local feature space minimums and maximums in parallel
auto& minimums = m_minimums;
auto& maximums = m_maximums;
#pragma omp parallel for reduction(vector_min: minimums) reduction(vector_max: maximums)
for (T* point = static_cast<T*>(m_data.m_p); point < end_point; point += dimensions) {
for (size_t d = 0; d < dimensions; ++d) {
const T& coordinate = point[d];
minimums[d] = std::min(minimums[d], coordinate);
maximums[d] = std::max(maximums[d], coordinate);
}
}
// exchange globally, if necessary
#ifdef WITH_MPI
MPI_Allreduce(MPI_IN_PLACE, m_minimums.data(), dimensions, MPI_Types<T>::map(), MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, m_maximums.data(), dimensions, MPI_Types<T>::map(), MPI_MAX, MPI_COMM_WORLD);
#endif
}
void compute_cell_dimensions() {
for (size_t i = 0; i < m_cell_dimensions.size(); ++i) {
size_t cells = static_cast<size_t>(std::ceil((m_maximums[i] - m_minimums[i]) / m_epsilon_map[i])) + 1;
m_cell_dimensions[i] = cells;
m_total_cells *= cells;
}
m_last_cell = m_total_cells;
}
void swap_dimensions() {
// fill the dimensions with an initially correct order
std::iota(m_swapped_dimensions.begin(), m_swapped_dimensions.end(), 0);
// swap the dimensions descending by their cell sizes
std::sort(m_swapped_dimensions.begin(), m_swapped_dimensions.end(), [&] (size_t a, size_t b) {
return m_cell_dimensions[a] < m_cell_dimensions[b];
});
// determine the halo size
m_halo = m_total_cells / m_cell_dimensions[m_swapped_dimensions.back()];
}
void compute_cells() {
CellHistogram histogram;
const size_t dimensions = m_data.m_chunk[1];
#pragma omp parallel for reduction(merge_histograms: histogram)
for (size_t i = 0; i < m_data.m_chunk[0]; ++i) {
const T* point = static_cast<T*>(m_data.m_p) + i * dimensions;
size_t cell = 0;
size_t accumulator = 1;
for (size_t d : m_swapped_dimensions) {
size_t index = static_cast<size_t>(std::floor((point[d] - m_minimums[d]) / m_epsilon_map[d]));
cell += index * accumulator;
accumulator *= m_cell_dimensions[d];
}
m_cells[i] = cell;
++histogram[cell];
}
m_cell_histogram.swap(histogram);
}
void compute_cell_index() {
size_t accumulator = 0;
// sum up the offset into the points array
for (auto& cell : m_cell_histogram)
{
auto& index = m_cell_index[cell.first];
index.first = accumulator;
index.second = cell.second;
accumulator += cell.second;
}
// introduce an end dummy
m_cell_index[m_last_cell].first = m_cells.size();
m_cell_index[m_last_cell].second = 0;
}
void sort_by_cell() {
const hsize_t items = m_data.m_chunk[0];
const hsize_t dimensions = m_data.m_chunk[1];
// initialize out-of-place buffers
Cells reordered_cells(items);
std::vector<size_t> reordered_indices(items);
std::vector<T> reordered_points(items * dimensions);
// memory for offset of already placed items
std::unordered_map<Cell, std::atomic<size_t>> offsets;
for (const auto& cell_index : m_cell_index) {
offsets[cell_index.first].store(0);
}
// sorting the points and cells out-of-place, memorize the original order
#pragma omp parallel for
for (size_t i = 0; i < items; ++i) {
const Cell cell = m_cells[i];
const auto& locator = m_cell_index[cell];
const size_t copy_to = locator.first + (offsets[cell]++);
reordered_cells[copy_to] = m_cells[i];
reordered_indices[copy_to] = m_initial_order[i];
for (size_t d = 0; d < dimensions; ++d) {
reordered_points[copy_to * dimensions + d] = static_cast<T*>(m_data.m_p)[i * dimensions + d];
}
}
// move the out-of-place results into the correct in-place buffers
m_cells.swap(reordered_cells);
m_initial_order.swap(reordered_indices);
std::copy(reordered_points.begin(), reordered_points.end(), static_cast<T*>(m_data.m_p));
}
#ifdef WITH_MPI
CellHistogram compute_global_histogram() {
// fetch cell histograms across all nodes
int send_counts[m_size];
int send_displs[m_size];
int recv_counts[m_size];
int recv_displs[m_size];
// determine the number of entries in each process' histogram
for (int i = 0; i < m_size; ++i) {
send_counts[i] = m_cell_histogram.size() * 2;
send_displs[i] = 0;
}
MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
// ... based on this information we can calculate the displacements into the buffer
size_t entries_count = 0;
for (int i = 0; i < m_size; ++i) {
recv_displs[i] = entries_count;
entries_count += recv_counts[i];
}
// serialize the local histogram into a flat buffer
std::vector<size_t> send_buffer(m_cell_histogram.size() * 2);
size_t send_buffer_index = 0;
for (const auto& item : m_cell_histogram) {
send_buffer[send_buffer_index++] = item.first;
send_buffer[send_buffer_index++] = item.second;
}
// exchange the histograms
std::vector<size_t> recv_buffer(entries_count);
MPI_Alltoallv(
send_buffer.data(), send_counts, send_displs, MPI_UNSIGNED_LONG,
recv_buffer.data(), recv_counts, recv_displs, MPI_UNSIGNED_LONG, MPI_COMM_WORLD
);
// sum-up the entries into a global histogram
CellHistogram global_histogram;
for (size_t i = 0; i < entries_count; i += 2) {
global_histogram[recv_buffer[i]] += recv_buffer[i + 1];
}
// remember the new globally last cell
m_last_cell = global_histogram.rbegin()->first + 1;
return global_histogram;
}
size_t compute_score(const Cell cell_id, const CellHistogram& cell_histogram) {
const hsize_t dimensions = m_data.m_chunk[1];
// allocate buffer for the dimensions steps
Cells neighboring_cells;
neighboring_cells.reserve(std::pow(3, dimensions));
neighboring_cells.push_back(cell_id);
// accumulators for sub-space traversal
size_t cells_in_lower_space = 1;
size_t cells_in_current_space = 1;
size_t points_in_cell = cell_histogram.find(cell_id)->second;
size_t number_of_points = points_in_cell;
// iterate through all neighboring cells and up the number of points stored there
for (size_t d : m_swapped_dimensions) {
cells_in_current_space *= m_cell_dimensions[d];
for (size_t i = 0, end = neighboring_cells.size(); i < end; ++i) {
const Cell current = neighboring_cells[i];
// cell to the left
const Cell left = current - cells_in_lower_space;
if (current % cells_in_current_space >= cells_in_lower_space) {
const auto& locator = cell_histogram.find(left);
number_of_points += locator != cell_histogram.end() ? locator->second : 0;
neighboring_cells.push_back(left);
}
// cell to the right
const Cell right = current + cells_in_lower_space;
if (current % cells_in_current_space < cells_in_current_space - cells_in_lower_space) {
const auto& locator = cell_histogram.find(right);
number_of_points += locator != cell_histogram.end() ? locator->second : 0;
neighboring_cells.push_back(right);
}
}
cells_in_lower_space = cells_in_current_space;
}
return points_in_cell * number_of_points;
}
void compute_bounds(const CellHistogram& cell_histogram) {
// make space in for the values in the bounds variables
m_cell_bounds.resize(m_size);
m_compute_bounds.resize(m_size);
// compute the score value for each cell and accumulate the total score first...
std::vector<size_t> scores(cell_histogram.size(), 0);
size_t total_score = 0;
size_t score_index = 0;
for (auto& pair : cell_histogram) {
const Cell cell = pair.first;
const size_t score = compute_score(cell, cell_histogram);
scores[score_index++] = score;
total_score += score;
}
// ...to determine the actual bounds
const size_t score_per_chunk = total_score / m_size + 1;
size_t accumulator = 0;
size_t target_rank = 0;
size_t lower_split_point = 0;
size_t bound_lower_start = 0;
auto cell_buckets = cell_histogram.begin();
// iterate over the score array and find the point where the score per chunk is exceeded
for (size_t i = 0; i < scores.size(); ++ i) {
const auto& cell_bucket = cell_buckets++;
const Cell cell = cell_bucket->first;
const size_t score = scores[i];
accumulator += score;
while (accumulator > score_per_chunk) {
const size_t split_point = (accumulator - score_per_chunk) / (score / cell_bucket->second);
// we have have identified the bounds in which the rank needs to compute locally
m_compute_bounds[target_rank][0] = lower_split_point;
m_compute_bounds[target_rank][1] = split_point;
lower_split_point = split_point;
// determine the cell bounds, i.e. all cells that we need including halo
auto& bound = m_cell_bounds[target_rank];
const size_t cell_offset = (bound_lower_start % m_halo) + m_halo;
bound[0] = cell_offset > bound_lower_start ? 0 : bound_lower_start - cell_offset;
bound[1] = bound_lower_start;
bound[2] = cell + 1;
bound[3] = std::min((bound[2] / m_halo) * m_halo + (m_halo * 2), m_last_cell);
// update the state, a whole chunk has been assigned
bound_lower_start = bound[2];
accumulator = split_point * score / cell_bucket->second;
// start assigning to the next rank
++target_rank;
}
// the left-overs are assigned to the current rank
if (static_cast<int>(target_rank) == m_size - 1 or i == cell_histogram.size() - 1) {
// compute bounds first
m_compute_bounds[target_rank][0] = lower_split_point;
m_compute_bounds[target_rank][1] = 0;
// cell bounds including halo next
auto& bound = m_cell_bounds[target_rank];
const size_t cell_offset = (bound_lower_start % m_halo) + m_halo;
bound[0] = cell_offset > bound_lower_start ? 0 : bound_lower_start - cell_offset;
bound[1] = bound_lower_start;
bound[2] = m_last_cell;
bound[3] = m_last_cell;
// we are done here
break;
}
}
}
void redistribute_dataset() {
const size_t dimensions = m_data.m_chunk[1];
// calculate the send number of points to be transmitted to each rank
int send_counts[m_size];
int send_displs[m_size];
int recv_counts[m_size];
int recv_displs[m_size];
for (int i = 0; i < m_size; ++i) {
const auto& bound = m_cell_bounds[i];
const size_t lower = m_cell_index.lower_bound(bound[0])->second.first;
const size_t upper = m_cell_index.lower_bound(bound[3])->second.first;
send_displs[i] = lower * dimensions;
send_counts[i] = upper * dimensions - send_displs[i];
}
// exchange how much data we send/receive to and from each rank
MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
for (int i = 0; i < m_size; ++i) {
recv_displs[i] = (i == 0) ? 0 : (recv_displs[i - 1] + recv_counts[i - 1]);
}
// calculate the corresponding send and receive counts for the label/order vectors
size_t total_recv_items = 0;
int send_counts_labels[m_size];
int send_displs_labels[m_size];
int recv_counts_labels[m_size];
int recv_displs_labels[m_size];
for (int i = 0; i < m_size; ++i) {
total_recv_items += recv_counts[i];
send_counts_labels[i] = send_counts[i] / dimensions;
send_displs_labels[i] = send_displs[i] / dimensions;
recv_counts_labels[i] = recv_counts[i] / dimensions;
recv_displs_labels[i] = recv_displs[i] / dimensions;
}
// allocate new buffers for the points and the order vectors
T* point_buffer = new T[total_recv_items];
std::vector<size_t> order_buffer(total_recv_items / dimensions);
// actually transmit the data
MPI_Alltoallv(
static_cast<T*>(m_data.m_p), send_counts, send_displs, MPI_Types<T>::map(),
point_buffer, recv_counts, recv_displs, MPI_Types<T>::map(), MPI_COMM_WORLD
);
MPI_Alltoallv(
m_initial_order.data(), send_counts_labels, send_displs_labels, MPI_UNSIGNED_LONG,
order_buffer.data(), recv_counts_labels, recv_displs_labels, MPI_UNSIGNED_LONG, MPI_COMM_WORLD
);
// clean up the previous data
delete[] static_cast<T*>(m_data.m_p);
m_cells.clear();
m_cell_index.clear();
// assign the new data
const hsize_t new_item_count = total_recv_items / dimensions;
m_data.m_chunk[0] = new_item_count;
m_cells.resize(new_item_count);
m_data.m_p = point_buffer;
m_initial_order.swap(order_buffer);
}
void compute_global_point_offset() {
m_global_point_offset = upper_halo_bound() - lower_halo_bound();
MPI_Exscan(MPI_IN_PLACE, &m_global_point_offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD);
if (m_rank == 0) m_global_point_offset = 0;
}
void sort_by_order(Clusters& clusters) {
// allocate the radix buckets
const size_t maximum_digit_count = static_cast<size_t>(std::ceil(std::log10(m_data.m_shape[0])));
std::vector<std::vector<size_t>> buckets(maximum_digit_count, std::vector<size_t>(RADIX_BUCKETS));
// count the items per bucket
size_t lower_bound = lower_halo_bound();
size_t upper_bound = upper_halo_bound();
const size_t items = upper_bound - lower_bound;
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < items; ++i) {
for (size_t j = 0; j < maximum_digit_count; ++j) {
const size_t base = RADIX_POWERS[j];
const size_t digit = m_initial_order[i + lower_bound] / base % RADIX_BUCKETS;
#pragma omp atomic
++buckets[j][digit];
}
}
// accumulate the bucket entries to get the offsets
#pragma omp parallel for shared(buckets)
for (size_t j = 0; j < maximum_digit_count; ++j) {
for (size_t f = 1; f < RADIX_BUCKETS; ++f) {
buckets[j][f] += buckets[j][f-1];
}
}
// actually reorder the points out-of-place
const hsize_t dimensions = m_data.m_chunk[1];
Clusters cluster_buffer(items);
std::vector<size_t> order_buffer(items);
T* point_buffer = new T[items * dimensions];
for (size_t j = 0; j < maximum_digit_count; ++j) {
const size_t base = RADIX_POWERS[j];
const size_t point_offset = lower_bound * dimensions;
// assign the number to the respective radix bucket
for (size_t i = items - 1; i < items; --i) {
size_t unit = m_initial_order[i + lower_bound] / base % RADIX_BUCKETS;
size_t pos = --buckets[j][unit];
order_buffer[pos] = m_initial_order[i + lower_bound];
cluster_buffer[pos] = clusters[i + lower_bound];
for (size_t d = 0; d < dimensions; ++d) {
point_buffer[pos * dimensions + d] = static_cast<T*>(m_data.m_p)[i * dimensions + d + point_offset];
}
}
// swap the buffers
clusters.swap(cluster_buffer);
m_initial_order.swap(order_buffer);
T* temp = static_cast<T*>(m_data.m_p);
m_data.m_p = point_buffer;
point_buffer = temp;
// this is somewhat hacky, in the first round we have the original buffers including(!) halos
// after the first swap, we do not anymore, since we reduced all the elements done to the non-halo zone
// we can easily adjust to that by removing the initial halo lower_bound offset
if (j == 0) {
lower_bound = 0;
}
}
// clean up
delete[] point_buffer;
m_data.m_chunk[0] = items;
}
#endif
public:
// SpatialIndex(Dataset& data, std::vector<float> epsilon_map)
SpatialIndex(Dataset& data, std::vector<float> epsilon_map)
: m_data(data),
m_epsilon_map(std::move(epsilon_map)),
m_minimums(data.m_chunk[1], std::numeric_limits<T>::max()),
m_maximums(data.m_chunk[1], std::numeric_limits<T>::min()),
m_cell_dimensions(data.m_chunk[1], 0),
m_total_cells(1),
m_last_cell(0),
m_cells(data.m_chunk[0], 0),
m_swapped_dimensions(data.m_chunk[1], 0),
m_halo(0),
m_global_point_offset(0),
m_initial_order(data.m_chunk[0]) {
// determine the space dimensions, the corresponding number of cells for each feature dimension
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
MPI_Comm_size(MPI_COMM_WORLD, &m_size);
#endif
#ifdef WITH_OUTPUT
double start = omp_get_wtime();
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "Computing cell space" << std::endl;
std::cout << "\tComputing dimensions..." << std::flush;
#ifdef WITH_MPI
}
#endif
#endif
compute_initial_order();
compute_space_dimensions();
compute_cell_dimensions();
swap_dimensions();
// determine the cell for each point, compute the cell histogram and index the points as if they were sorted...
#ifdef WITH_OUTPUT
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
std::cout << "\tComputing cells... " << std::flush;
#ifdef WITH_MPI
}
#endif
start = omp_get_wtime();
#endif
compute_cells();
compute_cell_index();
// ... actually sort the points to allow for O(1) access during neighborhood queries
#ifdef WITH_OUTPUT
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
std::cout << "\tSorting points... " << std::flush;
#ifdef WITH_MPI
}
#endif
start = omp_get_wtime();
#endif
sort_by_cell();
#ifdef WITH_OUTPUT
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
#ifdef WITH_MPI
}
#endif
#endif
// communicate the cell histograms and redistribute the points - only necessary when MPI is turned on
#ifdef WITH_MPI
#ifdef WITH_OUTPUT
start = omp_get_wtime();
if (m_rank == 0) {
std::cout << "\tDistributing points... " << std::flush;
}
#endif
// compute a global histogram and redistribute the points based on that
CellHistogram global_histogram = compute_global_histogram();
compute_bounds(global_histogram);
global_histogram.clear();
redistribute_dataset();
// after the redistribution we have to reindex the new data yet again
compute_cells();
compute_cell_index();
compute_global_point_offset();
sort_by_cell();
#ifdef WITH_OUTPUT
if (m_rank == 0) {
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
}
#endif
#endif
}
#ifdef WITH_MPI
size_t lower_halo_bound() const {
size_t lower = m_cell_index.lower_bound(m_cell_bounds[m_rank][1])->second.first;
return lower - m_compute_bounds[m_rank][0];
}
size_t upper_halo_bound() const {
size_t upper = m_cell_index.lower_bound(m_cell_bounds[m_rank][2])->second.first;
return upper - m_compute_bounds[m_rank][1];
}
Cuts compute_cuts() const {
Cuts cuts(m_size, Locator(0, 0));
for (int i = 0; i < m_size; ++i) {
// skip own rank
if (i == m_rank) {
continue;
}
const auto& cell_bound = m_cell_bounds[i];
const auto& compute_bound = m_compute_bounds[i];
// lower bound
const auto& lower_cut = m_cell_index.lower_bound(cell_bound[1]);
cuts[i].first = lower_cut->second.first;
if (lower_cut->first != m_last_cell or m_cell_index.find(m_last_cell - 1) != m_cell_index.end()) {
cuts[i].first = cuts[i].first < compute_bound[0] ? 0 : cuts[i].first - compute_bound[0];
}
// upper bound
const auto& upper_cut = m_cell_index.lower_bound(cell_bound[2]);
cuts[i].second = upper_cut->second.first;
if (upper_cut->first != m_last_cell || m_cell_index.find(m_last_cell - 1) != m_cell_index.end()) {
cuts[i].second = cuts[i].second < compute_bound[1] ? 0 : cuts[i].second - compute_bound[1];
}
}
return cuts;
}
#else
size_t lower_halo_bound() const {
return 0;
}
size_t upper_halo_bound() const {
return m_data.m_chunk[0];
}
#endif
inline Cell cell_of(size_t index) const {
return m_cells[index];
}
std::vector<size_t> get_neighbors(const Cell cell) const {
const hsize_t dimensions = m_data.m_chunk[1];
// allocate some space for the neighboring cells, be pessimistic and reserve 3^dims for possibly all neighbors
Cells neighboring_cells;
neighboring_cells.reserve(std::pow(3, dimensions));
neighboring_cells.push_back(cell);
// cell accumulators
size_t cells_in_lower_space = 1;
size_t cells_in_current_space = 1;
size_t number_of_points = m_cell_index.find(cell)->second.second;
// fetch all existing neighboring cells
for (size_t d : m_swapped_dimensions) {
cells_in_current_space *= m_cell_dimensions[d];
for (size_t i = 0, end = neighboring_cells.size(); i < end; ++i) {
const Cell current_cell = neighboring_cells[i];
// check "left" neighbor - a.k.a the cell in the current dimension that has a lower number
const Cell left = current_cell - cells_in_lower_space;
const auto found_left = m_cell_index.find(left);
if (current_cell % cells_in_current_space >= cells_in_lower_space) {
neighboring_cells.push_back(left);
number_of_points += found_left != m_cell_index.end() ? found_left->second.second : 0;
}
// check "right" neighbor - a.k.a the cell in the current dimension that has a higher number
const Cell right = current_cell + cells_in_lower_space;
const auto found_right = m_cell_index.find(right);
if (current_cell % cells_in_current_space < cells_in_current_space - cells_in_lower_space) {
neighboring_cells.push_back(right);
number_of_points += found_right != m_cell_index.end() ? found_right->second.second : 0;
}
}
cells_in_lower_space = cells_in_current_space;
}
// copy the points from the neighboring cells over
std::vector<size_t> neighboring_points;
neighboring_points.reserve(number_of_points);
for (size_t neighbor_cell : neighboring_cells) {
const auto found = m_cell_index.find(neighbor_cell);
// skip empty cells
if (found == m_cell_index.end()) {
continue;
}
// ... otherwise copy the points over
const Locator& locator = found->second;
neighboring_points.resize(neighboring_points.size() + locator.second);
std::iota(neighboring_points.end() - locator.second, neighboring_points.end(), locator.first);
}
return neighboring_points;
}
Cluster region_query(const size_t point_index, const std::vector<size_t>& neighboring_points, const float EPS2,
const Clusters& clusters, std::vector<size_t>& min_points_area) const {
const size_t dimensions = m_data.m_chunk[1];
const T* point = static_cast<T*>(m_data.m_p) + point_index * dimensions;
Cluster cluster_label = m_global_point_offset + point_index + 1;
// iterate through all neighboring points and check whether they are in range
for (size_t neighbor: neighboring_points) {
double offset = 0.0;
const T* other_point = static_cast<T*>(m_data.m_p) + neighbor * dimensions;
// determine euclidean distance to other point
for (size_t d = 0; d < dimensions; ++d) {
const size_t distance = point[d] - other_point[d];
offset += distance * distance;
}
// .. if in range, add it to the vector with in range points
if (offset <= EPS2) {
const Cluster neighbor_label = clusters[neighbor];
min_points_area.push_back(neighbor);
// if neighbor point has an assigned label and it is a core, determine what label to take
if (neighbor_label != NOT_VISITED and neighbor_label < 0) {
cluster_label = std::min(cluster_label, std::abs(neighbor_label));
}
}
}
return cluster_label;
}
size_t dimension() const {
return m_data.m_chunk[1];
}
Cluster region_query_multi_crit(const size_t point_index, const std::vector<size_t> &neighboring_points, const std::vector<EPSConfig> &all_eps,
const Clusters &clusters, std::vector<size_t> &min_points_area) const {
const size_t dimensions = m_data.m_chunk[1];
const T *point = static_cast<T *>(m_data.m_p) + point_index * dimensions;
Cluster cluster_label = m_global_point_offset + point_index + 1;
// iterate through all neighboring points and check whether they are in range
for (size_t neighbor : neighboring_points) {
const T *other_point = static_cast<T *>(m_data.m_p) + neighbor * dimensions;
std::vector<float> offsets;
for (auto &&eps_conf: all_eps) {
double offset = 0.0;
// determine euclidean distance to other point
for (auto &&d: eps_conf.dimensions) {
const size_t distance = point[d] - other_point[d];
offset += distance * distance;
}
offsets.push_back(offset);
}
// .. if in range, add it to the vector with in range points
bool in_range = true;
for (size_t i = 0; i < all_eps.size(); ++i) {
if (offsets[i] > all_eps[i].epsilon * all_eps[i].epsilon) {
in_range = false;
break;
}
}
if (in_range) {
const Cluster neighbor_label = clusters[neighbor];
min_points_area.push_back(neighbor);
// if neighbor point has an assigned label and it is a core, determine what label to take
if (neighbor_label != NOT_VISITED and neighbor_label < 0) {
cluster_label = std::min(cluster_label, std::abs(neighbor_label));
}
}
}
return cluster_label;
}
void recover_initial_order(Clusters& clusters) {
const hsize_t dimensions = m_data.m_chunk[1];
#ifdef WITH_MPI
sort_by_order(clusters);
// allocate buffers to do an inverse exchange
int send_counts[m_size];
int send_displs[m_size];
int recv_counts[m_size];
int recv_displs[m_size];
const size_t lower_bound = lower_halo_bound();
const size_t upper_bound = upper_halo_bound();
const size_t items = upper_bound - lower_bound;
const size_t chunk_size = m_data.m_shape[0] / m_size;
const size_t remainder = m_data.m_shape[0] % static_cast<size_t>(m_size);
size_t previous_offset = 0;
// find all the points that have a global index less than each rank's chunk size
for (size_t i = 1; i < static_cast<size_t>(m_size) + 1; ++i) {
const size_t chunk_end = chunk_size * i + (remainder > i ? i : remainder);
const auto split_iter = std::lower_bound(m_initial_order.begin(), m_initial_order.begin() + items, chunk_end);
size_t split_index = split_iter - m_initial_order.begin();
send_counts[i - 1] = static_cast<int>(split_index - previous_offset);
send_displs[i - 1] = static_cast<int>(previous_offset);
previous_offset = split_index;
}
// exchange the resulting item counts and displacements to get the incoming items for this rank
MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
for (int i = 0; i < m_size; ++i) {
recv_displs[i] = (i == 0) ? 0 : recv_displs[i - 1] + recv_counts[i - 1];
}
// redistribute the dataset to their original owner ranks
size_t total_recv_items = 0;
int send_counts_points[m_size];
int send_displs_points[m_size];
int recv_counts_points[m_size];
int recv_displs_points[m_size];
for (int i = 0; i < m_size; ++i) {
total_recv_items += recv_counts[i];
send_counts_points[i] = send_counts[i] * dimensions;
send_displs_points[i] = send_displs[i] * dimensions;
recv_counts_points[i] = recv_counts[i] * dimensions;
recv_displs_points[i] = recv_displs[i] * dimensions;
}
// allocate new buffers for the points and the order vectors
T* point_buffer = new T[total_recv_items * dimensions];
std::vector<size_t> order_buffer(total_recv_items);
Clusters cluster_buffer(total_recv_items);
// actually transmit the data
MPI_Alltoallv(
static_cast<T*>(m_data.m_p), send_counts_points, send_displs_points, MPI_Types<T>::map(),
point_buffer, recv_counts_points, recv_displs_points, MPI_Types<T>::map(), MPI_COMM_WORLD
);
MPI_Alltoallv(
m_initial_order.data(), send_counts, send_displs, MPI_Types<size_t>::map(),
order_buffer.data(), recv_counts, recv_displs, MPI_LONG, MPI_COMM_WORLD
);
MPI_Alltoallv(
clusters.data(), send_counts, send_displs, MPI_Types<size_t>::map(),
cluster_buffer.data(), recv_counts, recv_displs, MPI_LONG, MPI_COMM_WORLD
);
// assign the new data
delete[] static_cast<T*>(m_data.m_p);
m_data.m_p = point_buffer;
point_buffer = nullptr;
m_data.m_chunk[0] = total_recv_items;
m_initial_order.swap(order_buffer);
order_buffer.clear();
clusters.swap(cluster_buffer);
cluster_buffer.clear();
#endif
// only reordering step needed for non-MPI implementation and final local reordering for MPI version
// out-of-place rearranging of items
T* local_point_buffer = new T[m_initial_order.size() * dimensions];
std::vector<size_t> local_order_buffer(m_initial_order.size());
Clusters local_cluster_buffer(m_initial_order.size());
#pragma omp parallel for
for (size_t i = 0; i < m_initial_order.size(); ++i) {
const size_t copy_to = m_initial_order[i] - m_data.m_offset[0];
local_order_buffer[copy_to] = m_initial_order[i];
local_cluster_buffer[copy_to] = clusters[i];
for (size_t d = 0; d < dimensions; ++d) {
local_point_buffer[copy_to * dimensions + d] = static_cast<T*>(m_data.m_p)[i * dimensions + d];
}
}
clusters.swap(local_cluster_buffer);
m_initial_order.swap(local_order_buffer);
delete[] static_cast<T*>(m_data.m_p);
m_data.m_p = local_point_buffer;
}
};
#endif // SPATIAL_INDEX_H
|
yescrypt-simd_c.h | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform_c.h"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
static int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, passwd, passwdlen);
SHA256_Final(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX ctx;
HMAC_SHA256_Init(&ctx, buf, buflen);
HMAC_SHA256_Update(&ctx, salt, saltlen);
HMAC_SHA256_Final(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, sha256, sizeof(sha256));
SHA256_Final(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
GB_binop__rdiv_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_int64
// A.*B function (eWiseMult): GB_AemultB__rdiv_int64
// A*D function (colscale): GB_AxD__rdiv_int64
// D*A function (rowscale): GB_DxB__rdiv_int64
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_int64
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_int64
// C=scalar+B GB_bind1st__rdiv_int64
// C=scalar+B' GB_bind1st_tran__rdiv_int64
// C=A+scalar GB_bind2nd__rdiv_int64
// C=A'+scalar GB_bind2nd_tran__rdiv_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_SIGNED (y, x, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT64 || GxB_NO_RDIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rdiv_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 64) ; \
}
GrB_Info GB_bind1st_tran__rdiv_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 64) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rnn_helpers.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <algorithm>
#include <functional>
#include <future>
#include <string>
#include <vector>
#include "gsl/span"
#include "gsl/gsl_algorithm"
#include "core/common/common.h"
#include "core/common/task_thread_pool.h"
#include "core/common/logging/logging.h"
#include "core/framework/allocator.h"
#include "core/util/math.h"
#include "core/util/math_cpuonly.h"
namespace onnxruntime {
class Tensor;
class OpKernelContext;
namespace rnn {
namespace detail {
enum Direction {
kForward = 0,
kReverse = 1,
kBidirectional = 2
};
inline Direction MakeDirection(const std::string& direction) {
if (direction == "forward") {
return kForward;
} else if (direction == "reverse") {
return kReverse;
} else if (direction == "bidirectional") {
return kBidirectional;
} else {
ORT_THROW("Invalid 'direction' argument of '", direction,
"'. Must be one of 'forward', 'reverse', or 'bidirectional'.");
}
}
/** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe
@param allocator IAllocator to use for the allocation.
@param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'.
@param unique_ptr unique_ptr that will control the lifetime of the allocated memory.
@param fill If true, fill the allocated memory with fill_value.
@param fill_value Value to use if 'fill' is true.
@returns A span to provide bounds checked access to the allocated memory.
*/
template <typename TAlloc>
gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator,
size_t size,
IAllocatorUniquePtr<TAlloc>& unique_ptr,
bool fill = false, TAlloc fill_value = TAlloc{}) {
unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size);
auto span = gsl::make_span(unique_ptr.get(), size);
if (fill) {
// Do't use span.begin() it will cause performance issue and stop compiler to optimize the code
std::fill_n(unique_ptr.get(), size, fill_value);
}
return span;
}
// validate the common inputs to RNN, LSTM and GRU operators
Status ValidateCommonRnnInputs(const Tensor& X,
const Tensor& W,
const Tensor& R,
const Tensor* B,
int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs
const Tensor* sequence_lens,
const Tensor* initial_h,
int64_t num_directions,
int64_t hidden_size);
/// Copy an input array repeatedly to an output array
/// @param input_begin Beginning of input
/// @param input_end End of input
/// @param output Output iterator
/// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized.
/// @returns Position of output iterator after copy is completed
template <typename TInIter, typename TOutIter>
TOutIter RepeatVectorToConstructArray(TInIter input_begin,
TInIter input_end,
TOutIter output,
int64_t repetitions) {
for (int64_t i = 0; i < repetitions; i++) {
output = std::copy(input_begin, input_end, output);
}
return output;
}
// reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size]
// and output to shape [seq_length, num_directions, batch_size, hidden_size]
template <typename T>
void ReverseSequence(gsl::span<const T> inputs,
gsl::span<T> inputs_reverse,
gsl::span<const int> sequence_lengths,
const int max_sequence_length,
const int batch_size,
const int input_size,
const int num_directions) {
for (int i = 0; i < batch_size; i++) {
int seq_len = sequence_lengths[i];
if (seq_len == 0)
continue;
// Parallel execute the loop.
#pragma omp for
for (int j = 0; j < seq_len; j++) {
gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size);
gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size);
// Use gsl::copy instead of std::copy() to allow compiler to optimize the code
gsl::copy(src, dest);
}
#pragma omp for
for (int j = seq_len; j < max_sequence_length; j++) {
gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size);
gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size);
// Use gsl::copy instead of std::copy() to allow compiler to optimize the code
gsl::copy(src, dest);
}
}
}
// A has size M x K, B has size N x K (transposed), and C has size M x N
// We check that A, B and C are large enough before calling the lower level GEMM implementation
template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter>
void ComputeGemm(const int M,
const int N,
const int K,
const float alpha,
TSpanAIter A,
TSpanAIter A_end,
const int lda,
TSpanBIter B,
TSpanBIter B_end,
const int ldb,
const float beta,
TSpanCIter C,
TSpanCIter C_end,
const int ldc) {
// validate all the inputs
// need to use the lda/ldb/ldc strides which should be >= the columns for the span
ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N);
ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end);
ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end);
ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end);
::onnxruntime::math::GemmEx<float, CPUMathUtil>(
CblasNoTrans, CblasTrans,
M, N, K, alpha,
&*A, lda,
&*B, ldb, beta,
&*C, ldc, &CPUMathUtil::Instance());
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur,
typename gsl::span<T>::const_iterator end,
size_t size) {
ORT_ENFORCE(cur + size <= end);
return &*cur;
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) {
ORT_ENFORCE(offset + size <= size_t(span.size()));
return span.data();
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
T* SafeRawPointer(typename gsl::span<T>::iterator cur,
typename gsl::span<T>::iterator end,
size_t size) {
ORT_ENFORCE(cur + size <= end);
return &*cur;
}
// helper to convert a span to a raw pointer
// after validating the memory covered by the span supports the size required
template <typename T>
T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) {
ORT_ENFORCE(offset + size <= size_t(span.size()));
return span.data() + offset;
}
template <typename TLambda>
void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step,
TaskThreadPool& ttp, const ::onnxruntime::logging::Logger& logger) {
// #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug
#ifdef NOTHREADS
ORT_UNUSED_PARAMETER(ttp);
ORT_UNUSED_PARAMETER(logger);
for (int i = 0; i < max; i += step) {
(void)name;
std::bind(lambda, i)();
}
#else
std::vector<std::future<void> > task_results{};
task_results.reserve(static_cast<size_t>(std::ceil(max / step)));
for (int i = 0; i < max; i += step) {
std::packaged_task<void()> task{std::bind(lambda, i)};
task_results.emplace_back(task.get_future());
ttp.RunTask(std::move(task));
}
try {
// wait for all and propagate any exceptions
for (auto& future : task_results)
future.get();
} catch (const std::exception& ex) {
LOGS(logger, ERROR) << name << " - exception running tasks: " << ex.what();
throw;
}
#endif
}
void DumpMatrixImpl(const std::string& name, const float* src, int row, int col,
int offset = 0, int col_width = -1);
// Helper class to wrap the processing of the activation funcs and any alpha/beta values.
// The alpha/beta values are consumed in the order of the activation funcs. once they run out
// defaults will be used as needed.
// The Entries property contains the normalized function names and the alpha/beta value to use.
class ActivationFuncs {
public:
struct Entry {
const std::string name;
const float alpha;
const float beta;
};
ActivationFuncs() = default;
ActivationFuncs(const std::vector<std::string>& funcs,
const std::vector<float>& alphas,
const std::vector<float>& betas);
const std::vector<Entry>& Entries() const {
return entries_;
}
private:
std::vector<Entry> entries_;
};
namespace deepcpu {
using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int);
using ClipWithBiasFuncPtr = void (*)(const float, const float*, float*, const int);
using ActivationFuncPtr = void (*)(float*, const int, const float, const float);
using ActivationFuncBPtr = void (*)(const float*, float*, const int, const float, const float);
using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, const int, const float, const float);
using GruResetGateFuncPtr = void (*)(const float*, float*, float*, const int, const float, const float);
using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, const int, const float, const float);
ActivationFuncPtr ActivationFuncByName(const std::string& func);
LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func);
GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func);
GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func);
void add_bias_into_ignore(const float* ignored, float* pd, const int c);
void add_bias_into(const float* ps, float* pd, const int c);
void clip(const float b, float* pd, const int c);
void clip_add_bias(const float b, const float* pb, float* pd, const int c);
void clip_ignore_bias(const float b, const float* pb, float* pd, const int c);
void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void relu_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void sigmoid_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void tanh_exact_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, const float alpha, const float beta);
void sigmoid(float* pd, int c, const float alpha, const float beta);
void tanh(float* pd, int c, const float alpha, const float beta);
void relu(float* pd, int c, const float alpha, const float beta);
void sigmoid_exact(float* pd, int c, const float alpha, const float beta);
void tanh_exact(float* pd, int c, const float alpha, const float beta);
void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, const int c);
void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta);
void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta);
void gru_reset_gate_relu(const float* ps1, float* ps2, float* pd, const int c, const float alpha, const float beta);
void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta);
void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta);
void gru_output_gate_relu(float* ph, const float* pz, const float* ps, float* po, const int c, const float alpha, const float beta);
inline void elementwise_product(const float* op1, const float* op2, float* dest, const int size) {
for (int i = 0; i < size; i++)
dest[i] += op1[i] * op2[i];
}
inline void elementwise_sum1(const float* src, float* dest, const int size) {
for (int i = 0; i < size; i++)
dest[i] += src[i];
}
inline void elementwise_sum2(const float* src1, const float* src2, float* dest, const int size) {
for (int i = 0; i < size; i++)
dest[i] += src1[i] + src2[i];
}
} // namespace deepcpu
} // namespace detail
} // namespace rnn
} // namespace onnxruntime
|
vectorPrimitives.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus, Rajesh Gandham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "agmg.h"
dfloat norm(dlong n, dfloat *a){
dfloat result = 0.;
#pragma omp parallel for reduction(+:result)
for(dlong i=0; i<n; i++){
result += a[i]*a[i];
}
return sqrt(result);
}
dfloat innerProd(dlong n, dfloat *a, dfloat *b){
dfloat result = 0.;
#pragma omp parallel for reduction(+:result)
for(dlong i=0; i<n; i++)
result += a[i]*b[i];
return result;
}
void doubleInnerProd(dlong n, dfloat *aDotbc, dfloat *a, dfloat *b, dfloat *c) {
dfloat aDotb = 0.;
dfloat aDotc = 0.;
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc)
for(dlong i=0; i<n; i++) {
aDotb += a[i]*b[i];
aDotc += a[i]*c[i];
}
aDotbc[0] = aDotb;
aDotbc[1] = aDotc;
}
// returns aDotbc[0] = a\dot b, aDotbc[1] = a\dot c, aDotbc[2] = b\dot b,
void kcycleCombinedOp1(dlong n, dfloat *aDotbc, dfloat *a,
dfloat *b, dfloat *c, dfloat* w, bool weighted) {
dfloat aDotb = 0.;
dfloat aDotc = 0.;
dfloat bDotb = 0.;
if (weighted) {
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc) reduction(+:bDotb)
for(dlong i=0; i<n; i++) {
aDotb += w[i]*a[i]*b[i];
aDotc += w[i]*a[i]*c[i];
bDotb += w[i]*b[i]*b[i];
}
} else {
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc) reduction(+:bDotb)
for(dlong i=0; i<n; i++) {
aDotb += a[i]*b[i];
aDotc += a[i]*c[i];
bDotb += b[i]*b[i];
}
}
aDotbc[0] = aDotb;
aDotbc[1] = aDotc;
aDotbc[2] = bDotb;
}
// returns aDotbcd[0] = a\dot b, aDotbcd[1] = a\dot c, aDotbcd[2] = a\dot d,
void kcycleCombinedOp2(dlong n, dfloat *aDotbcd, dfloat *a, dfloat *b,
dfloat *c, dfloat* d, dfloat *w, bool weighted) {
dfloat aDotb = 0.;
dfloat aDotc = 0.;
dfloat aDotd = 0.;
if (weighted) {
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc) reduction(+:aDotd)
for(dlong i=0; i<n; i++) {
aDotb += w[i]*a[i]*b[i];
aDotc += w[i]*a[i]*c[i];
aDotd += w[i]*a[i]*d[i];
}
} else {
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc) reduction(+:aDotd)
for(dlong i=0; i<n; i++) {
aDotb += a[i]*b[i];
aDotc += a[i]*c[i];
aDotd += a[i]*d[i];
}
}
aDotbcd[0] = aDotb;
aDotbcd[1] = aDotc;
aDotbcd[2] = aDotd;
}
// y = beta*y + alpha*x
void vectorAdd(dlong n, dfloat alpha, dfloat *x, dfloat beta, dfloat *y){
#pragma omp parallel for
for(dlong i=0; i<n; i++)
y[i] = beta*y[i] + alpha*x[i];
}
// y = beta*y + alpha*x, and return y\dot y
dfloat vectorAddInnerProd(dlong n, dfloat alpha, dfloat *x, dfloat beta, dfloat *y,
dfloat *w, bool weighted){
dfloat result = 0.;
if (weighted) {
#pragma omp parallel for reduction(+:result)
for(dlong i=0; i<n; i++) {
y[i] = beta*y[i] + alpha*x[i];
result += w[i]*y[i]*y[i];
}
} else {
#pragma omp parallel for reduction(+:result)
for(dlong i=0; i<n; i++) {
y[i] = beta*y[i] + alpha*x[i];
result += y[i]*y[i];
}
}
return result;
}
void dotStar(dlong m, dfloat *a, dfloat *b){
#pragma omp parallel for
for(dlong i=0; i<m; i++)
b[i] *= a[i];
}
void scaleVector(dlong m, dfloat *a, dfloat alpha){
#pragma omp parallel for
for(dlong i=0; i<m; i++)
a[i] *= alpha;
}
void setVector(dlong m, dfloat *a, dfloat alpha){
#pragma omp parallel for
for(dlong i=0; i<m; i++)
a[i] = alpha;
}
void addScalar(dlong m, dfloat alpha, dfloat *a){
#pragma omp parallel for
for(dlong i=0; i<m; i++)
a[i] += alpha;
}
dfloat sumVector(dlong m, dfloat *a){
dfloat alpha = 0.;
#pragma omp parallel for reduction(+:alpha)
for (dlong i=0; i<m; i++) {
alpha += a[i];
}
return alpha;
}
void randomize(dlong m, dfloat *a){
for(dlong i=0; i<m; i++)
a[i] = (dfloat) drand48();
}
dfloat maxEntry(dlong n, dfloat *a){
if(n == 0)
return 0;
dfloat maxVal = 0.;
// #pragma omp parallel for reduction(max:maxVal)
for(dlong i=0; i<n; i++){
dfloat a2 = (a[i] < 0) ? -a[i] : a[i];
if(maxVal < a2){
maxVal = a2;
}
}
return maxVal;
}
void scaleVector(parAlmond_t *parAlmond, dlong N, occa::memory o_a, dfloat alpha){
if (N) parAlmond->scaleVectorKernel(N, alpha, o_a);
}
void setVector(parAlmond_t *parAlmond, dlong N, occa::memory o_a, dfloat alpha){
if (N) parAlmond->setVectorKernel(N, alpha, o_a);
}
dfloat sumVector(parAlmond_t *parAlmond, dlong N, occa::memory o_a){
dlong numBlocks = ((N+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
if(!numBlocks) numBlocks = 1;
if (N) parAlmond->sumVectorKernel(numBlocks,N,o_a,parAlmond->o_rho);
parAlmond->o_rho.copyTo(parAlmond->rho,numBlocks*sizeof(dfloat),0);
dfloat alpha =0.;
#pragma omp parallel for reduction(+:alpha)
for (dlong i=0; i<numBlocks; i++) {
alpha += parAlmond->rho[i];
}
return alpha;
}
void addScalar(parAlmond_t *parAlmond, dlong N, dfloat alpha, occa::memory o_a){
if (N) parAlmond->addScalarKernel(N, alpha, o_a);
}
void dotStar(parAlmond_t *parAlmond, dlong N, occa::memory o_a, occa::memory o_b){
if (N) parAlmond->simpleDotStarKernel(N, o_a, o_b);
}
void dotStar(parAlmond_t *parAlmond, dlong N, dfloat alpha, occa::memory o_a,
occa::memory o_b, dfloat beta, occa::memory o_c){
if (N) parAlmond->dotStarKernel(N, alpha, beta, o_a, o_b, o_c);
}
dfloat innerProd(parAlmond_t *parAlmond, dlong N,
occa::memory o_x, occa::memory o_y){
dlong numBlocks = ((N+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
if(!numBlocks) numBlocks = 1;
parAlmond->innerProdKernel(numBlocks,N,o_x,o_y,parAlmond->o_rho);
parAlmond->o_rho.copyTo(parAlmond->rho,numBlocks*sizeof(dfloat),0);
dfloat result =0.;
#pragma omp parallel for reduction(+:result)
for (dlong i=0; i<numBlocks; i++) {
result += parAlmond->rho[i];
}
return result;
}
// returns aDotbc[0] = a\dot b, aDotbc[1] = a\dot c, aDotbc[2] = b\dot b,
void kcycleCombinedOp1(parAlmond_t *parAlmond, dlong N, dfloat *aDotbc,
occa::memory o_a, occa::memory o_b,
occa::memory o_c, occa::memory o_w, bool weighted) {
dlong numBlocks = ((N+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
if(!numBlocks) numBlocks = 1;
if (weighted) {
parAlmond->kcycleWeightedCombinedOp1Kernel(numBlocks,N,o_a,o_b,o_c,o_w,parAlmond->o_rho);
} else {
parAlmond->kcycleCombinedOp1Kernel(numBlocks,N,o_a,o_b,o_c,parAlmond->o_rho);
}
parAlmond->o_rho.copyTo(parAlmond->rho,3*numBlocks*sizeof(dfloat),0);
dfloat aDotb = 0., aDotc = 0., bDotb = 0.;
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc) reduction(+:bDotb)
for(dlong i=0; i<numBlocks; i++) {
aDotb += parAlmond->rho[3*i+0];
aDotc += parAlmond->rho[3*i+1];
bDotb += parAlmond->rho[3*i+2];
}
aDotbc[0] = aDotb;
aDotbc[1] = aDotc;
aDotbc[2] = bDotb;
}
// returns aDotbcd[0] = a\dot b, aDotbcd[1] = a\dot c, aDotbcd[2] = a\dot d,
void kcycleCombinedOp2(parAlmond_t *parAlmond, dlong N, dfloat *aDotbcd,
occa::memory o_a, occa::memory o_b,
occa::memory o_c, occa::memory o_d,
occa::memory o_w, bool weighted) {
dlong numBlocks = ((N+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
if(!numBlocks) numBlocks = 1;
if (weighted) {
parAlmond->kcycleWeightedCombinedOp2Kernel(numBlocks,N,o_a,o_b,o_c,o_d,o_w,parAlmond->o_rho);
} else {
parAlmond->kcycleCombinedOp2Kernel(numBlocks,N,o_a,o_b,o_c,o_d,parAlmond->o_rho);
}
parAlmond->o_rho.copyTo(parAlmond->rho,3*numBlocks*sizeof(dfloat),0);
dfloat aDotb = 0., aDotc = 0., aDotd = 0.;
#pragma omp parallel for reduction(+:aDotb) reduction(+:aDotc) reduction(+:aDotd)
for(dlong i=0; i<numBlocks; i++) {
aDotb += parAlmond->rho[3*i+0];
aDotc += parAlmond->rho[3*i+1];
aDotd += parAlmond->rho[3*i+2];
}
aDotbcd[0] = aDotb;
aDotbcd[1] = aDotc;
aDotbcd[2] = aDotd;
}
// y = beta*y + alpha*x, and return y\dot y
dfloat vectorAddInnerProd(parAlmond_t *parAlmond, dlong N, dfloat alpha, occa::memory o_x,
dfloat beta, occa::memory o_y,
occa::memory o_w, bool weighted){
dlong numBlocks = ((N+RDIMX*RDIMY-1)/(RDIMX*RDIMY))/RLOAD;
if(!numBlocks) numBlocks = 1;
if (weighted) {
parAlmond->vectorAddWeightedInnerProdKernel(numBlocks,N,alpha,beta,o_x,o_y,o_w,parAlmond->o_rho);
} else {
parAlmond->vectorAddInnerProdKernel(numBlocks,N,alpha,beta,o_x,o_y,parAlmond->o_rho);
}
parAlmond->o_rho.copyTo(parAlmond->rho,numBlocks*sizeof(dfloat),0);
dfloat result =0.;
#pragma omp parallel for reduction(+:result)
for (dlong i=0; i<numBlocks; i++) {
result += parAlmond->rho[i];
}
return result;
}
void vectorAdd(parAlmond_t *parAlmond, dlong N, dfloat alpha, occa::memory o_x, dfloat beta, occa::memory o_y){
parAlmond->vectorAddKernel(N, alpha, beta, o_x, o_y);
}
void vectorAdd(parAlmond_t *parAlmond, dlong N, dfloat alpha, occa::memory o_x,
dfloat beta, occa::memory o_y, occa::memory o_z){
parAlmond->vectorAddKernel2(N, alpha, beta, o_x, o_y, o_z);
}
|
lotus5_fmt_plug.c | //original work by Jeff Fay
//some optimisations by bartavelle at bandecon.com
/* OpenMP support and further optimizations (including some code rewrites)
* by Solar Designer */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_lotus5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_lotus5);
#else
#include <stdio.h>
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "common.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "memdbg.h"
#ifdef __x86_64__
#define LOTUS_N 3
#define LOTUS_N_STR " X3"
#else
#define LOTUS_N 2
#define LOTUS_N_STR " X2"
#endif
/*preprocessor constants that John The Ripper likes*/
#define FORMAT_LABEL "lotus5"
#define FORMAT_NAME "Lotus Notes/Domino 5"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR LOTUS_N_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT LOTUS_N
/* Must be divisible by any LOTUS_N (thus, by 2 and 3) */
#define MAX_KEYS_PER_CRYPT 0x900
/*A struct used for JTR's benchmarks*/
static struct fmt_tests tests[] = {
{"06E0A50B579AD2CD5FFDC48564627EE7", "secret"},
{"355E98E7C7B59BD810ED845AD0FD2FC4", "password"},
{"CD2D90E8E00D8A2A63A81F531EA8A9A3", "lotus"},
{"69D90B46B1AC0912E5CCF858094BBBFC", "dirtydog"},
{NULL}
};
static const unsigned char lotus_magic_table[] = {
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36
};
/*Some more JTR variables*/
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int n = omp_get_max_threads();
if (n < 1)
n = 1;
n *= 2;
if (n > self->params.max_keys_per_crypt)
n = self->params.max_keys_per_crypt;
self->params.min_keys_per_crypt = n;
#endif
crypt_key = mem_calloc_align(sizeof(*crypt_key),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
/*Utility function to convert hex to bin */
static void * get_binary(char *ciphertext)
{
static ARCH_WORD_32 out[BINARY_SIZE/4];
char *realcipher = (char*)out;
int i;
for (i = 0; i < BINARY_SIZE; i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
return (void*)out;
}
/*Another function required by JTR: decides whether we have a valid
* ciphertext */
static int
valid (char *ciphertext, struct fmt_main *self)
{
int i;
for (i = 0; i < CIPHERTEXT_LENGTH; i++)
if (!(((ciphertext[i] >= '0') && (ciphertext[i] <= '9'))
//|| ((ciphertext[i] >= 'a') && (ciphertext[i] <= 'f'))
|| ((ciphertext[i] >= 'A') && (ciphertext[i] <= 'F'))))
{
return 0;
}
return !ciphertext[i];
}
/*sets the value of saved_key so we can play with it*/
static void set_key (char *key, int index)
{
strnzcpy (saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
/*retrieves the saved key; used by JTR*/
static char * get_key (int index)
{
return saved_key[index];
}
static int cmp_all (void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one (void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact (char *source, int index)
{
return 1;
}
/*Beginning of private functions*/
/* Takes the plaintext password and generates the second row of our
* working matrix for the final call to the mixing function*/
static void MAYBE_INLINE
#if LOTUS_N == 3
lotus_transform_password (unsigned char *i0, unsigned char *o0,
unsigned char *i1, unsigned char *o1,
unsigned char *i2, unsigned char *o2)
#else
lotus_transform_password (unsigned char *i0, unsigned char *o0,
unsigned char *i1, unsigned char *o1)
#endif
{
unsigned char t0, t1;
#if LOTUS_N == 3
unsigned char t2;
#endif
int i;
#if LOTUS_N == 3
t0 = t1 = t2 = 0;
#else
t0 = t1 = 0;
#endif
for (i = 0; i < 8; i++)
{
t0 = *o0++ = lotus_magic_table[ARCH_INDEX(*i0++ ^ t0)];
t1 = *o1++ = lotus_magic_table[ARCH_INDEX(*i1++ ^ t1)];
#if LOTUS_N == 3
t2 = *o2++ = lotus_magic_table[ARCH_INDEX(*i2++ ^ t2)];
#endif
t0 = *o0++ = lotus_magic_table[ARCH_INDEX(*i0++ ^ t0)];
t1 = *o1++ = lotus_magic_table[ARCH_INDEX(*i1++ ^ t1)];
#if LOTUS_N == 3
t2 = *o2++ = lotus_magic_table[ARCH_INDEX(*i2++ ^ t2)];
#endif
}
}
/* The mixing function: perturbs the first three rows of the matrix*/
#if LOTUS_N == 3
static void lotus_mix (unsigned char *m0, unsigned char *m1,
unsigned char *m2)
#else
static void lotus_mix (unsigned char *m0, unsigned char *m1)
#endif
{
unsigned char t0, t1;
unsigned char *p0, *p1;
#if LOTUS_N == 3
unsigned char t2;
unsigned char *p2;
#endif
int i, j;
#if LOTUS_N == 3
t0 = t1 = t2 = 0;
#else
t0 = t1 = 0;
#endif
for (i = 18; i > 0; i--)
{
p0 = m0;
p1 = m1;
#if LOTUS_N == 3
p2 = m2;
#endif
for (j = 48; j > 0; j--)
{
t0 = p0[0] ^= lotus_magic_table[ARCH_INDEX(j + t0)];
t1 = p1[0] ^= lotus_magic_table[ARCH_INDEX(j + t1)];
#if LOTUS_N == 3
t2 = p2[0] ^= lotus_magic_table[ARCH_INDEX(j + t2)];
#endif
j--;
t0 = p0[1] ^= lotus_magic_table[ARCH_INDEX(j + t0)];
p0 += 2;
t1 = p1[1] ^= lotus_magic_table[ARCH_INDEX(j + t1)];
p1 += 2;
#if LOTUS_N == 3
t2 = p2[1] ^= lotus_magic_table[ARCH_INDEX(j + t2)];
p2 += 2;
#endif
}
}
}
/*the last public function; generates ciphertext*/
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += LOTUS_N) {
struct {
union {
unsigned char m[64];
unsigned char m4[4][16];
ARCH_WORD m4w[4][16 / ARCH_SIZE];
} u;
} ctx[LOTUS_N];
int password_length;
memset(ctx[0].u.m4[0], 0, 16);
password_length = strlen(saved_key[index]);
memset(ctx[0].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[0].u.m4[1], saved_key[index], password_length);
memcpy(ctx[0].u.m4[2], ctx[0].u.m4[1], 16);
memset(ctx[1].u.m4[0], 0, 16);
password_length = strlen(saved_key[index + 1]);
memset(ctx[1].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[1].u.m4[1], saved_key[index + 1], password_length);
memcpy(ctx[1].u.m4[2], ctx[1].u.m4[1], 16);
#if LOTUS_N == 3
memset(ctx[2].u.m4[0], 0, 16);
password_length = strlen(saved_key[index + 2]);
memset(ctx[2].u.m4[1], (PLAINTEXT_LENGTH - password_length), PLAINTEXT_LENGTH);
memcpy(ctx[2].u.m4[1], saved_key[index + 2], password_length);
memcpy(ctx[2].u.m4[2], ctx[2].u.m4[1], 16);
lotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],
ctx[1].u.m4[1], ctx[1].u.m4[3],
ctx[2].u.m4[1], ctx[2].u.m4[3]);
lotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);
#else
lotus_transform_password(ctx[0].u.m4[1], ctx[0].u.m4[3],
ctx[1].u.m4[1], ctx[1].u.m4[3]);
lotus_mix(ctx[0].u.m, ctx[1].u.m);
#endif
memcpy(ctx[0].u.m4[1], ctx[0].u.m4[3], 16);
memcpy(ctx[1].u.m4[1], ctx[1].u.m4[3], 16);
#if LOTUS_N == 3
memcpy(ctx[2].u.m4[1], ctx[2].u.m4[3], 16);
#endif
{
int i;
for (i = 0; i < 16 / ARCH_SIZE; i++) {
ctx[0].u.m4w[2][i] = ctx[0].u.m4w[0][i] ^ ctx[0].u.m4w[1][i];
ctx[1].u.m4w[2][i] = ctx[1].u.m4w[0][i] ^ ctx[1].u.m4w[1][i];
#if LOTUS_N == 3
ctx[2].u.m4w[2][i] = ctx[2].u.m4w[0][i] ^ ctx[2].u.m4w[1][i];
#endif
}
}
#if LOTUS_N == 3
lotus_mix(ctx[0].u.m, ctx[1].u.m, ctx[2].u.m);
#else
lotus_mix(ctx[0].u.m, ctx[1].u.m);
#endif
memcpy(crypt_key[index], ctx[0].u.m4[0], BINARY_SIZE);
memcpy(crypt_key[index + 1], ctx[1].u.m4[0], BINARY_SIZE);
#if LOTUS_N == 3
memcpy(crypt_key[index + 2], ctx[2].u.m4[0], BINARY_SIZE);
#endif
}
return count;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
static int binary_hash_0(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_0; }
static int binary_hash_1(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_1; }
static int binary_hash_2(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_2; }
static int binary_hash_3(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_3; }
static int binary_hash_4(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_4; }
static int binary_hash_5(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_5; }
static int binary_hash_6(void * binary) { return *(ARCH_WORD_32 *)binary & PH_MASK_6; }
/* C's version of a class specifier */
struct fmt_main fmt_lotus5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omp-parallel-if.c | /* { dg-do compile } */
extern int foo(void);
extern void bar(void);
int main ()
{
/* Malformed uses of 'if' and 'num_threads'. */
#pragma omp parallel if (foo () > 10) if (foo () == 3) /* { dg-error "too many" } */
{
bar ();
}
#pragma omp parallel num_threads (3) num_threads (20) /* { dg-error "too many" } */
{
bar ();
}
/* Valid uses of 'if' and 'num_threads'. */
#pragma omp parallel if (foo () == 10) num_threads (foo ())
{
bar ();
}
}
|
DRB035-truedepscalar-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Loop carried true dep between tmp =.. and ..= tmp.
Data race pair: tmp@66:12 vs. tmp@67:5
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int tmp;
tmp = 10;
int len=100;
int a[100];
#pragma omp parallel for
for (i=0;i<len;i++)
{
a[i] = tmp;
tmp =a[i]+i;
}
printf("a[50]=%d\n", a[50]);
omprace_fini();
return 0;
}
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/xml-tree.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,
% const size_t width,const size_t height,
% const ssize_t offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o offset: the mean offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const ssize_t offset,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
number_pixels;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse)
{
InheritException(exception,&threshold_image->exception);
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Local adaptive threshold.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
number_pixels=(MagickRealType) (width*height);
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
channel_bias,
channel_sum;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict r;
register IndexPacket
*magick_restrict threshold_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
height/2L,image->columns+width,height,exception);
q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view);
channel_bias=zero;
channel_sum=zero;
r=p;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
{
channel_bias.red+=r[u].red;
channel_bias.green+=r[u].green;
channel_bias.blue+=r[u].blue;
channel_bias.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
channel_bias.index=(MagickRealType)
GetPixelIndex(indexes+(r-p)+u);
}
channel_sum.red+=r[u].red;
channel_sum.green+=r[u].green;
channel_sum.blue+=r[u].blue;
channel_sum.opacity+=r[u].opacity;
if (image->colorspace == CMYKColorspace)
channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u);
}
r+=image->columns+width;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
mean;
mean=zero;
r=p;
channel_sum.red-=channel_bias.red;
channel_sum.green-=channel_bias.green;
channel_sum.blue-=channel_bias.blue;
channel_sum.opacity-=channel_bias.opacity;
channel_sum.index-=channel_bias.index;
channel_bias=zero;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias.red+=r[0].red;
channel_bias.green+=r[0].green;
channel_bias.blue+=r[0].blue;
channel_bias.opacity+=r[0].opacity;
if (image->colorspace == CMYKColorspace)
channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0);
channel_sum.red+=r[width-1].red;
channel_sum.green+=r[width-1].green;
channel_sum.blue+=r[width-1].blue;
channel_sum.opacity+=r[width-1].opacity;
if (image->colorspace == CMYKColorspace)
channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+
width-1);
r+=image->columns+width;
}
mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset);
mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset);
mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset);
mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset);
if (image->colorspace == CMYKColorspace)
mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset);
SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ?
0 : QuantumRange);
SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ?
0 : QuantumRange);
SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ?
0 : QuantumRange);
SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ?
0 : QuantumRange);
if (image->colorspace == CMYKColorspace)
SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex(
threshold_indexes+x) <= mean.index) ? 0 : QuantumRange));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(threshold_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const Image *image,const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
magick_unreferenced(image);
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p++;
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(image,histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property);
return(BilevelImage(image,QuantumRange*threshold/100.0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImageChannel method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold)
% MagickBooleanType BilevelImageChannel(Image *image,
% const ChannelType channel,const double threshold)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: define the threshold values.
%
% Aside: You can get the same results as operator using LevelImageChannels()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold)
{
MagickBooleanType
status;
status=BilevelImageChannel(image,DefaultChannels,threshold);
return(status);
}
MagickExport MagickBooleanType BilevelImageChannel(Image *image,
const ChannelType channel,const double threshold)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if ((channel & SyncChannels) != 0)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 :
QuantumRange);
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
}
else
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 :
QuantumRange);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 :
QuantumRange);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 :
QuantumRange);
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold ? 0 : QuantumRange);
else
SetPixelAlpha(q,(MagickRealType) GetPixelAlpha(q) <= threshold ?
OpaqueOpacity : TransparentOpacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <=
threshold ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,const char *threshold)
% MagickBooleanType BlackThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=BlackThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
threshold.opacity=geometry_info.psi;
if ((flags & PsiValue) == 0)
threshold.opacity=threshold.red;
threshold.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
threshold.index=threshold.red;
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.opacity*=(MagickRealType) (QuantumRange/100.0);
threshold.index*=(MagickRealType) (QuantumRange/100.0);
}
if ((IsMagickGray(&threshold) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
/*
Black threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
((MagickRealType) GetPixelRed(q) < threshold.red))
SetPixelRed(q,0);
if (((channel & GreenChannel) != 0) &&
((MagickRealType) GetPixelGreen(q) < threshold.green))
SetPixelGreen(q,0);
if (((channel & BlueChannel) != 0) &&
((MagickRealType) GetPixelBlue(q) < threshold.blue))
SetPixelBlue(q,0);
if (((channel & OpacityChannel) != 0) &&
((MagickRealType) GetPixelOpacity(q) < threshold.opacity))
SetPixelOpacity(q,0);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
((MagickRealType) GetPixelIndex(indexes+x) < threshold.index))
SetPixelIndex(indexes+x,0);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImageChannel method is:
%
% MagickBooleanType ClampImage(Image *image)
% MagickBooleanType ClampImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image)
{
MagickBooleanType
status;
status=ClampImageChannel(image,DefaultChannels);
return(status);
}
MagickExport MagickBooleanType ClampImageChannel(Image *image,
const ChannelType channel)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q)));
SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q)));
SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q)));
SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q)));
q++;
}
return(SyncImage(image));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q)));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex(
indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMapFile(const char *xml,
const char *filename,const char *map_id,ExceptionInfo *exception)
{
const char
*attribute,
*content;
double
value;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
map = (ThresholdMap *) NULL;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(map);
for (threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold = GetNextXMLTreeTag(threshold) )
{
attribute=GetXMLTreeAttribute(threshold, "map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold, "alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
The map has been found -- allocate a Threshold Map to return
*/
map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap));
if (map == (ThresholdMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
/*
Assign basic attributeibutes.
*/
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels, "divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
/*
Allocate theshold levels array.
*/
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
{
char
*p;
register ssize_t
i;
/*
Parse levels into integer array.
*/
for (i=0; i< (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() load and search one or more threshold map files for the
% a map matching the given name or aliase.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
XMLTreeInfo *thresholds,*threshold,*description;
const char *map,*alias,*content;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
for( threshold = GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold = GetNextXMLTreeTag(threshold) )
{
map = GetXMLTreeAttribute(threshold, "map");
if (map == (char *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias = GetXMLTreeAttribute(threshold, "alias");
/* alias is optional, no if test needed */
description=GetXMLTreeChild(threshold,"description");
if ( description == (XMLTreeInfo *) NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if ( content == (char *) NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() uses the ordered dithering technique of reducing color
% images to monochrome using positional information to retain as much
% information as possible.
%
% WARNING: This function is deprecated, and is now just a call to
% the more more powerful OrderedPosterizeImage(); function.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image)
% MagickBooleanType OrderedDitherImageChannel(Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image)
{
MagickBooleanType
status;
status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception);
return(status);
}
MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Call the augumented function OrderedPosterizeImage()
*/
status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedPosterizeImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedPosterizeImage method is:
%
% MagickBooleanType OrderedPosterizeImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
% MagickBooleanType OrderedPosterizeImageChannel(Image *image,
% const ChannelType channel,const char *threshold_map,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedPosterizeImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map,
exception);
return(status);
}
MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image,
const ChannelType channel,const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
LongPixelPacket
levels;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
{
char
token[MaxTextExtent];
register const char
*p;
p=(char *)threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0')) {
if ((p-threshold_map) >= (MaxTextExtent-1))
break;
token[p-threshold_map] = *p;
p++;
}
token[p-threshold_map] = '\0';
map = GetThresholdMap(token, exception);
if ( map == (ThresholdMap *) NULL ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
}
/* Set channel levels from extra comma separated arguments
Default to 2, the single value given, or individual channel values
*/
#if 1
{ /* parse directly as a comma separated list of integers */
char *p;
p = strchr((char *) threshold_map,',');
if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) )
levels.index = (unsigned int) strtoul(p, &p, 10);
else
levels.index = 2;
levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0;
levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0;
levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0;
levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0;
levels.index = ((channel & IndexChannel) != 0
&& (image->colorspace == CMYKColorspace)) ? levels.index : 0;
/* if more than a single number, each channel has a separate value */
if ( p != (char *) NULL && *p == ',' ) {
p=strchr((char *) threshold_map,',');
p++;
if ((channel & RedChannel) != 0)
levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & GreenChannel) != 0)
levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & BlueChannel) != 0)
levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace)
levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
if ((channel & OpacityChannel) != 0)
levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++);
}
}
#else
/* Parse level values as a geometry */
/* This difficult!
* How to map GeometryInfo structure elements into
* LongPixelPacket structure elements, but according to channel?
* Note the channels list may skip elements!!!!
* EG -channel BA -ordered-dither map,2,3
* will need to map g.rho -> l.blue, and g.sigma -> l.opacity
* A simpler way is needed, probably converting geometry to a temporary
* array, then using channel to advance the index into ssize_t pixel packet.
*/
#endif
#if 0
printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n",
levels.red, levels.green, levels.blue, levels.opacity, levels.index);
#endif
{ /* Do the posterized ordered dithering of the image */
ssize_t
d;
/* d = number of psuedo-level divisions added between color levels */
d = map->divisor-1;
/* reduce levels to levels - 1 */
levels.red = levels.red ? levels.red-1 : 0;
levels.green = levels.green ? levels.green-1 : 0;
levels.blue = levels.blue ? levels.blue-1 : 0;
levels.opacity = levels.opacity ? levels.opacity-1 : 0;
levels.index = levels.index ? levels.index-1 : 0;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
threshold,
t,
l;
/*
Figure out the dither threshold for this pixel
This must be a integer from 1 to map->divisor-1
*/
threshold = map->levels[(x%map->width) +map->width*(y%map->height)];
/* Dither each channel in the image as appropriate
Notes on the integer Math...
total number of divisions = (levels-1)*(divisor-1)+1)
t1 = this colors psuedo_level =
q->red * total_divisions / (QuantumRange+1)
l = posterization level 0..levels
t = dither threshold level 0..divisor-1 NB: 0 only on last
Each color_level is of size QuantumRange / (levels-1)
NB: All input levels and divisor are already had 1 subtracted
Opacity is inverted so 'off' represents transparent.
*/
if (levels.red) {
t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1));
l = t/d; t = t-l*d;
SetPixelRed(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red)));
}
if (levels.green) {
t = (ssize_t) (QuantumScale*GetPixelGreen(q)*
(levels.green*d+1));
l = t/d; t = t-l*d;
SetPixelGreen(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green)));
}
if (levels.blue) {
t = (ssize_t) (QuantumScale*GetPixelBlue(q)*
(levels.blue*d+1));
l = t/d; t = t-l*d;
SetPixelBlue(q,ClampToQuantum((MagickRealType)
((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue)));
}
if (levels.opacity) {
t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))*
(levels.opacity*d+1));
l = t/d; t = t-l*d;
SetPixelOpacity(q,ClampToQuantum((MagickRealType)
((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/
levels.opacity)));
}
if (levels.index) {
t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)*
(levels.index*d+1));
l = t/d; t = t-l*d;
SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+
(t>=threshold))*(MagickRealType) QuantumRange/levels.index)));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImageChannel method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon)
% MagickBooleanType PerceptibleImageChannel(Image *image,
% const ChannelType channel,const double epsilon)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon)
{
MagickBooleanType
status;
status=PerceptibleImageChannel(image,DefaultChannels,epsilon);
return(status);
}
MagickExport MagickBooleanType PerceptibleImageChannel(Image *image,
const ChannelType channel,const double epsilon)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon));
SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon));
SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon));
SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon));
q++;
}
return(SyncImage(image));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x),
epsilon));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const char *thresholds,ExceptionInfo *exception)
% MagickBooleanType RandomThresholdImageChannel(Image *image,
% const ChannelType channel,const char *thresholds,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o thresholds: a geometry string containing low,high thresholds. If the
% string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4
% is performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=RandomThresholdImageChannel(image,DefaultChannels,thresholds,
exception);
return(status);
}
MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickStatusType
flags;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickRealType
min_threshold,
max_threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (thresholds == (const char *) NULL)
return(MagickTrue);
GetMagickPixelPacket(image,&threshold);
min_threshold=0.0;
max_threshold=(MagickRealType) QuantumRange;
flags=ParseGeometry(thresholds,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(thresholds,'%') != (char *) NULL)
{
max_threshold*=(MagickRealType) (0.01*QuantumRange);
min_threshold*=(MagickRealType) (0.01*QuantumRange);
}
else
if (((max_threshold == min_threshold) || (max_threshold == 1)) &&
(min_threshold <= 8))
{
/*
Backward Compatibility -- ordered-dither -- IM v 6.2.9-6.
*/
status=OrderedPosterizeImageChannel(image,channel,thresholds,exception);
return(status);
}
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
if (channel == CompositeChannels)
{
if (AcquireImageColormap(image,2) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
IndexPacket
index;
MagickRealType
intensity;
intensity=GetPixelIntensity(image,q);
if (intensity < min_threshold)
threshold.index=min_threshold;
else if (intensity > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType)(QuantumRange*
GetPseudoRandomValue(random_info[id]));
index=(IndexPacket) (intensity <= threshold.index ? 0 : 1);
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
if ((MagickRealType) GetPixelRed(q) < min_threshold)
threshold.red=min_threshold;
else
if ((MagickRealType) GetPixelRed(q) > max_threshold)
threshold.red=max_threshold;
else
threshold.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & GreenChannel) != 0)
{
if ((MagickRealType) GetPixelGreen(q) < min_threshold)
threshold.green=min_threshold;
else
if ((MagickRealType) GetPixelGreen(q) > max_threshold)
threshold.green=max_threshold;
else
threshold.green=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & BlueChannel) != 0)
{
if ((MagickRealType) GetPixelBlue(q) < min_threshold)
threshold.blue=min_threshold;
else
if ((MagickRealType) GetPixelBlue(q) > max_threshold)
threshold.blue=max_threshold;
else
threshold.blue=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & OpacityChannel) != 0)
{
if ((MagickRealType) GetPixelOpacity(q) < min_threshold)
threshold.opacity=min_threshold;
else
if ((MagickRealType) GetPixelOpacity(q) > max_threshold)
threshold.opacity=max_threshold;
else
threshold.opacity=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold)
threshold.index=min_threshold;
else
if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold)
threshold.index=max_threshold;
else
threshold.index=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ?
0 : QuantumRange);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ?
0 : QuantumRange);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ?
0 : QuantumRange);
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <=
threshold.opacity ? 0 : QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <=
threshold.index ? 0 : QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold)
% MagickBooleanType WhiteThresholdImageChannel(Image *image,
% const ChannelType channel,const char *threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel or channels to be thresholded.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *threshold)
{
MagickBooleanType
status;
status=WhiteThresholdImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image,
const ChannelType channel,const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
flags=ParseGeometry(thresholds,&geometry_info);
GetMagickPixelPacket(image,&threshold);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold.green=threshold.red;
threshold.blue=geometry_info.xi;
if ((flags & XiValue) == 0)
threshold.blue=threshold.red;
threshold.opacity=geometry_info.psi;
if ((flags & PsiValue) == 0)
threshold.opacity=threshold.red;
threshold.index=geometry_info.chi;
if ((flags & ChiValue) == 0)
threshold.index=threshold.red;
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.opacity*=(MagickRealType) (QuantumRange/100.0);
threshold.index*=(MagickRealType) (QuantumRange/100.0);
}
if ((IsMagickGray(&threshold) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace);
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (((channel & RedChannel) != 0) &&
((MagickRealType) GetPixelRed(q) > threshold.red))
SetPixelRed(q,QuantumRange);
if (((channel & GreenChannel) != 0) &&
((MagickRealType) GetPixelGreen(q) > threshold.green))
SetPixelGreen(q,QuantumRange);
if (((channel & BlueChannel) != 0) &&
((MagickRealType) GetPixelBlue(q) > threshold.blue))
SetPixelBlue(q,QuantumRange);
if (((channel & OpacityChannel) != 0) &&
((MagickRealType) GetPixelOpacity(q) > threshold.opacity))
SetPixelOpacity(q,QuantumRange);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index)
SetPixelIndex(indexes+x,QuantumRange);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include <rabit/rabit.h>
#include "xgboost/base.h"
#include "xgboost/json.h"
#include "xgboost/tree_updater.h"
#include "param.h"
#include "constraints.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = ToJson(param_);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = batch[fid];
if (c.size() != 0) {
CHECK_LT(fid * 2, fminmax_.size());
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
void SampleCol(float p, std::vector<bst_feature_t> *p_findex) const {
std::vector<bst_feature_t> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
{
// setup position
position_.resize(gpair.size());
std::fill(position_.begin(), position_.end(), 0);
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
CHECK_EQ(param_.sampling_method, TrainParam::kUniform)
<< "Only uniform sampling is supported, "
<< "gradient-based sampling is only support by GPU Hist.";
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
qexpand_.push_back(0);
this->UpdateNode2WorkIndex(tree);
}
this->interaction_constraints_.Configure(param_, fmat.Info().num_col_);
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = batch[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (auto fid : fsplits) {
auto col = batch[fid];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats());
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid] = TStats();
}
}
// setup position
const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair[ridx]);
}
}
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s = TStats();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
FeatureInteractionConstraintHost interaction_constraints_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
thread_scheduling.c | // RUN: %libomp-compile && env KMP_ABT_NUM_ESS=4 %libomp-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include "bolt_scheduling_util.h"
int test_thread_scheduling(int num_threads) {
int i, vals[num_threads];
memset(vals, 0, sizeof(int) * num_threads);
timeout_barrier_t barrier;
timeout_barrier_init(&barrier);
#pragma omp parallel num_threads(num_threads)
{
check_num_ess(4);
// The barrier must be run by all ESs.
timeout_barrier_wait(&barrier, 4);
vals[omp_get_thread_num()] += 1;
}
#pragma omp parallel for num_threads(num_threads)
for (i = 0; i < num_threads; i++) {
check_num_ess(4);
// The barrier must be run by all ESs.
timeout_barrier_wait(&barrier, 4);
vals[i] += 2;
}
for (i = 0; i < num_threads; i++) {
if (vals[i] != 3) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 1; i < 4; i++) {
if (!test_thread_scheduling(i * 4)) {
num_failed++;
}
}
return num_failed;
}
|
DRB111-linearmissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
* loop missing the linear clause
* Data race pair: j@67:7 vs. j@68:5
*/
int main()
{
int len=100;
double a[len], b[len], c[len];
int i,j=0;
#pragma omp parallel for private(i)
for (i=0;i<len;i++)
{
a[i]=((double)i)/2.0;
b[i]=((double)i)/3.0;
c[i]=((double)i)/7.0;
}
#pragma omp parallel for private(i) linear(j)
for (i=0;i<len;i++)
{
c[j]+=a[i]*b[i];
j++;
}
printf ("c[50]=%f\n",c[50]);
return 0;
}
|
omp_atomic.c | <ompts:test>
<ompts:testdescription>Test which checks the omp atomic directive by counting up a variable in a parallelized loop with an atomic directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp atomic</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_atomic</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int sum;
int diff;
double dsum = 0;
double dt = 0.5; /* base of geometric row for + and - test*/
double ddiff;
int product;
int x;
int *logics;
int bit_and = 1;
int bit_or = 0;
int exclusiv_bit_or = 0;
</ompts:orphan:vars>
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int j;
int known_sum;
int known_diff;
int known_product;
int result = 0;
int logic_and = 1;
int logic_or = 0;
double dknown_sum;
double rounding_error = 1.E-9;
double dpt, div;
int logicsArray[LOOPCOUNT];
logics = logicsArray;
sum = 0;
diff = 0;
product = 1;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++)
{
<ompts:check>#pragma omp atomic</ompts:check>
sum += i;
}
</ompts:orphan>
}
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
if (known_sum != sum)
{
fprintf (logFile,
"Error in sum with integers: Result was %d instead of %d.\n",
sum, known_sum);
result++;
}
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++)
{
<ompts:check>#pragma omp atomic</ompts:check>
diff -= i;
}
</ompts:orphan>
}
known_diff = ((LOOPCOUNT - 1) * LOOPCOUNT) / 2 * -1;
if (diff != known_diff)
{
fprintf (logFile,
"Error in difference with integers: Result was %d instead of 0.\n",
diff);
result++;
}
/* Tests for doubles */
dsum = 0;
dpt = 1;
for (j = 0; j < DOUBLE_DIGITS; ++j)
{
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 -dt);
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
dsum += pow (dt, i);
}
</ompts:orphan>
}
if (dsum != dknown_sum && (fabs (dsum - dknown_sum) > rounding_error))
{
fprintf (logFile,
"Error in sum with doubles: Result was %f instead of: %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
result++;
}
dpt = 1;
for (j = 0; j < DOUBLE_DIGITS; ++j)
{
dpt *= dt;
}
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < DOUBLE_DIGITS; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
ddiff -= pow (dt, i);
}
</ompts:orphan>
}
if (fabs (ddiff) > rounding_error)
{
fprintf (logFile,
"Error in difference with doubles: Result was %E instead of 0.0\n",
ddiff);
result++;
}
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
<ompts:check>#pragma omp atomic</ompts:check>
product *= i;
}
</ompts:orphan>
}
known_product = KNOWN_PRODUCT;
if (known_product != product)
{
fprintf (logFile,
"Error in product with integers: Result was %d instead of %d\n",
product, known_product);
result++;
}
product = KNOWN_PRODUCT;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
product /= i;
}
</ompts:orphan>
}
if (product != 1)
{
fprintf (logFile,
"Error in product division with integers: Result was %d instead of 1\n",
product);
result++;
}
div = 5.0E+5;
#pragma omp parallel
{
int i;
#pragma omp for
for (i = 1; i <= MAX_FACTOR; i++)
{
<ompts:check>#pragma omp atomic</ompts:check>
div /= i;
}
}
if (fabs(div-0.137787) >= 1.0E-4 )
{
result++;
fprintf (logFile,
"Error in division with double: Result was %f instead of 0.137787\n", div);
}
x = 0;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
x++;
}
</ompts:orphan>
}
if (x != LOOPCOUNT)
{
result++;
fprintf (logFile, "Error in ++\n");
}
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
x--;
}
</ompts:orphan>
}
if (x != 0)
{
result++;
fprintf (logFile, "Error in --\n");
}
for (j = 0; j < LOOPCOUNT; ++j)
{
logics[j] = 1;
}
bit_and = 1;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
bit_and &= logics[i];
}
</ompts:orphan>
}
if (!bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 1\n");
}
bit_and = 1;
logics[LOOPCOUNT / 2] = 0;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
bit_and &= logics[i];
}
</ompts:orphan>
}
if (bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 2\n");
}
for (j = 0; j < LOOPCOUNT; j++)
{
logics[j] = 0;
}
bit_or = 0;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
bit_or |= logics[i];
}
</ompts:orphan>
}
if (bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
bit_or |= logics[i];
}
</ompts:orphan>
}
if (!bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 2\n");
}
for (j = 0; j < LOOPCOUNT; j++)
{
logics[j] = 0;
}
exclusiv_bit_or = 0;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
exclusiv_bit_or ^= logics[i];
}
</ompts:orphan>
}
if (exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT / 2] = 1;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < LOOPCOUNT; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
exclusiv_bit_or ^= logics[i];
}
</ompts:orphan>
}
if (!exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 2\n");
}
x = 1;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < 10; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
x <<= 1;
}
</ompts:orphan>
}
if ( x != 1024)
{
result++;
fprintf (logFile, "Error in <<\n");
x = 1024;
}
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < 10; ++i)
{
<ompts:check>#pragma omp atomic</ompts:check>
x >>= 1;
}
</ompts:orphan>
}
if (x != 1)
{
result++;
fprintf (logFile, "Error in >>\n");
}
return (result == 0);
}
</ompts:testcode>
</ompts:test>
|
GB_unaryop__abs_fp32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_int8
// op(A') function: GB_tran__abs_fp32_int8
// C type: float
// A type: int8_t
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_int8
(
float *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CPUMatrixImpl.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//
#pragma once
#include "Basics.h"
#include "File.h"
#include "CPUMatrix.h"
#include "TensorOps.h"
#include <assert.h>
#include <stdexcept>
#include <omp.h>
#include <math.h>
#include <random>
#include <chrono>
#include <exception>
#include <thread>
#include <iostream>
#include <algorithm>
#include <numeric>
#pragma warning(push)
#pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data
#include <boost/random/normal_distribution.hpp>
#pragma warning(pop)
#include <boost/random/uniform_real_distribution.hpp>
#ifdef _WIN32
#define NOMINMAX
#include "Windows.h"
#else
#include <cfloat>
#endif
#ifdef LEAKDETECT
#include <vld.h>
#endif
#pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this
#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this
#pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons
#pragma warning(disable : 4702) // conversion from 'double' to 'float'
#ifdef USE_MKL
// requires MKLML 0.11 and above
#include <mkl_cblas.h>
#include <mkl_lapacke.h>
#include <mkl_service.h>
#else
#ifdef _MSC_VER
// Visual Studio doesn't define standard complex types properly
#define HAVE_LAPACK_CONFIG_H
#define LAPACK_COMPLEX_STRUCTURE
#endif
#include <cblas.h>
#include <lapacke.h>
#endif
#define SWAP(a, b) \
{ \
(a) ^= (b); \
(b) ^= (a); \
(a) ^= (b); \
}
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {
#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
RowMajor = 101, // row-major arrays
ColMajor = 102 // column-major arrays
};
enum class MatrixTranspose : char
{
NoTrans = 'N', // trans='N'
Trans = 'T', // trans='T'
ConjTrans = 'C' // trans='C'
};
enum class SymMatrixType : char
{
Up = 'U', // symmetric matrix is stored in the upper part
Low = 'L', // symmetric matrix is stored in thelower part
Full = 'F', // full populated
NotSymmetric = 'N' // not a symmetric matrix
};
enum class MatrixOpSide : char
{
Left = 'L', // left multiply
Right = 'R', // right multiply
};
#pragma endregion Helpful Enum Definitions
#pragma region Constructors and Destructor
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix()
{
ZeroInit();
}
// helper to allocate an array of ElemType
// Use this instead of new[] to get NaN initialization for debugging.
template <class ElemType>
static ElemType* NewArray(size_t n)
{
// We need to allocate possibly one more element for the following reason.
// At some point we might want to fill a buffer with the result of a random
// number generator. The RNG is oblivious to whether the buffer is on the
// CPU or GPU but it needs to keep an accurate tally of how many numbers it
// has generated. The trouble stems from the fact that generating an odd
// number gaussians on the GPU is not supported so we must always
// generate an even number. So since we wouldn't know how to update the tally
// we are making this allocate one more element in the worst case.
ElemType* p = new ElemType[AsMultipleOf(n, 2)]();
#if 0 // _DEBUG
ElemType nan = Matrix<ElemType>::MakeNan(__LINE__);
for (size_t i = 0; i < n; i++)
p[i] = nan;
#endif
return p;
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols)
{
ZeroInit();
m_numRows = numRows;
m_numCols = numCols;
SetSizeAllocated(GetNumElements());
if (GetNumElements() != 0)
{
SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType));
}
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
ZeroInit();
SetValue(numRows, numCols, pArray, matrixFlags);
}
//copy constructor, deep copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom)
{
ZeroInit();
SetValue(deepCopyFrom);
}
//assignment operator, deep copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom)
{
SetValue(deepCopyFrom);
return *this;
}
//move constructor, shallow copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom)
: Base(/* shallow */ true)
{
ShallowCopyFrom(moveFrom);
moveFrom.ZeroValues();
}
// Shortcut of default constructor + shallow copy, to avoid one initialization
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow)
: Base(shallow)
{
ShallowCopyFrom(shallowCopyFrom);
}
//move assignment operator, shallow copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom)
{
if (this != &moveFrom)
{
ShallowCopyFrom(moveFrom);
// release the pointer from the source object so that the destructor won't release it twice
moveFrom.ZeroValues();
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::Clear()
{
ZeroInit();
}
#pragma endregion Constructors and Destructor
#pragma region Basic Operators
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const
{
if (startColumn + numCols > m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols);
CPUMatrix<ElemType> slice(*this, /* shallow= */ true);
slice.m_numCols = numCols;
slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows;
return slice;
}
// set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1)
// TODO: why not say *this = ColumnSlice()?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > fromMatrix.m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols);
Clear();
ShallowCopyFrom(fromMatrix);
m_numCols = numCols;
m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows;
return *this;
}
// set this(: , startColumn:startColumn+numCols-1)= fromMatrix;
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > m_numCols)
LogicError("The slice is out of range of the destination matrix.");
if (numCols > fromMatrix.GetNumCols())
InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols());
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType));
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride)
{
if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix.");
if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix.");
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
long n = (long) numCols, m = (long) m_numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride);
us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride);
us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride);
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
}
}
}
//for each column of a, we add all rows of a to this starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) = a(i, j);
us(startRow + 1, j) = a(i + 1, j);
us(startRow + 2, j) = a(i + 2, j);
us(startRow + 3, j) = a(i + 3, j);
}
// handle remaining stuffs
for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) = a(i, j);
}
}
return *this;
}
//for each column of a, we assign numRows starting from startIndex to this
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (startIndex + numRows > a.GetNumRows())
LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
RequireSize(numRows, a.GetNumCols());
long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t
long k = (long) a.GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// memory copy might be faster?
memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows);
// //four-way unrolling
// for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4)
// {
// us(i,j) = a(startRow,j);
// us(i+1,j) = a(startRow+1,j);
// us(i+2,j) = a(startRow+2,j);
// us(i+3,j) = a(startRow+3,j);
// }
// //handle remaining stuffs
// for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++)
// {
// us(i,j) = a(startRow,j);
// }
}
return *this;
}
//for the row slice of this starting from startIndex we add a to it.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddToRowSliceValuesOf: input matrix a is empty.");
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) += a(i, j);
us(startRow + 1, j) += a(i + 1, j);
us(startRow + 2, j) += a(i + 2, j);
us(startRow + 3, j) += a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) += a(i, j);
}
}
return *this;
}
//for each column of this, we add row slice of a starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddWithRowSliceValuesOf: input matrix a is empty.");
if (GetNumRows() != numRows)
LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows.");
if (startIndex + numRows > a.GetNumRows())
LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddWithRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(i, j) += a(startRow, j);
us(i + 1, j) += a(startRow + 1, j);
us(i + 2, j) += a(startRow + 2, j);
us(i + 3, j) += a(startRow + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(i, j) += a(startRow, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const
{
if (m_numRows != m_numCols)
LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols);
CPUMatrix<ElemType> diag(1, m_numCols);
auto& us = *this;
#pragma omp parallel for
for (long i = 0; i < m_numRows; i++)
{
diag(0, (size_t) i) = us(i, i);
}
return diag;
}
template <class ElemType>
void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position)
{
if (position < c.GetNumElements())
c.Data()[position] -= 1.0;
else
RuntimeError("MinusOneAt: position is out of CPU matrix size");
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats)
{
if (this == &a)
LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat.");
if (a.IsEmpty())
LogicError("AssignRepeatOf: Matrix a is empty.");
RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats);
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long q = 0; q < numColRepeats; q++)
{
for (long p = 0; p < numRowRepeats; p++)
{
long colOffset = q * n;
for (long j = 0; j < n; j++, colOffset++)
{
long rowOffset = p * m;
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4)
{
us(rowOffset, colOffset) = a(i, j);
us(rowOffset + 1, colOffset) = a(i + 1, j);
us(rowOffset + 2, colOffset) = a(i + 2, j);
us(rowOffset + 3, colOffset) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++, rowOffset++)
{
us(rowOffset, colOffset) = a(i, j);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats)
{
if (a.IsEmpty())
LogicError("AddToRowRepeatValuesOf: input matrix a is empty.");
if (a.GetNumRows() != GetNumRows() * numRepeats)
LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats.");
long n = (long) a.GetNumCols(), m = (long) GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
us(i + 1, j) += a(k * m + i + 1, j);
us(i + 2, j) += a(k * m + i + 2, j);
us(i + 3, j) += a(k * m + i + 3, j);
}
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose()
{
if (IsEmpty())
LogicError("Transpose: Matrix is empty.");
CPUMatrix<ElemType> c;
c.AssignTransposeOf(*this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a)
{
if (this == &a)
LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose.");
if (a.IsEmpty())
LogicError("AssignTransposeOf: Matrix a is empty.");
RequireSize(a.GetNumCols(), a.GetNumRows());
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(j, i) = a(i, j);
us(j, i + 1) = a(i + 1, j);
us(j, i + 2) = a(i + 2, j);
us(j, i + 3) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(j, i) = a(i, j);
}
}
return *this;
}
// dst[i] = src[i] * alpha + dst[i] * beta
// scale a column vector and add it to another
// The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN.
template <class ElemType>
static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha)
{
if (alpha != 1) // rare case: just do the full thing
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + alpha * src[i];
else if (beta == 1) // used in backprop
for (size_t i = 0; i < numRows; i++)
dst[i] += src[i];
else if (beta == 0) // plain assignment
memcpy(dst, src, sizeof(ElemType) * numRows);
else // alpha=1, arbitrary beta: also rare case
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + src[i];
}
// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoGatherColumnsOf: Map must be a row vector.");
if (beta)
VerifySize(a.GetNumRows(), idx.GetNumCols());
else
Resize(a.GetNumRows(), idx.GetNumCols());
auto& us = *this;
// race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf().
#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jOut, us)
{
auto jInF = idx(0, jOut); // this is the column we need to get
if (std::isnan(jInF) || jInF < 0) // negative index means gap
continue;
size_t jIn = (size_t)jInF;
if (jIn >= a.GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols());
ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha);
}
return *this;
}
// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoScatterColumnsOf: Map must be a row vector.");
if (idx.GetNumCols() != a.GetNumCols())
InvalidArgument("DoScatterColumnsOf: Map must have width of input vector.");
if (a.GetNumRows() != GetNumRows())
InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector.");
auto& us = *this;
// pre-scale with beta upfront
// Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding.
Scale(beta, us); // if beta is 0, then this will be a memset()
ScatterValues(idx.Data(), a.Data(), us.Data(), alpha, idx.GetNumCols(), a.GetNumRows(), GetNumCols(), idx.GetNumRows());
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const ElemType v)
{
if (IsEmpty())
LogicError("SetValue: Matrix is empty.");
bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v);
if (isFinite && v == 0)
{
memset(Data(), 0, sizeof(ElemType) * GetNumElements());
}
else
{
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// 2-way thread parallelism is sufficient for the memory bound
// operation of just setting the values of an array.
const unsigned SETVALUE_NUM_THREADS = 2;
UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off.
#pragma omp parallel for num_threads(SETVALUE_NUM_THREADS)
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = v;
bufPtr[i + 1] = v;
bufPtr[i + 2] = v;
bufPtr[i + 3] = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry)
{
if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry))
RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'.");
auto& us = *this;
long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
if (columnsMask(0, j) == 1)
continue;
for (long k = 0; k < numColsPerMaskEntry; ++k)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
us(i + 1, (j * numColsPerMaskEntry) + k) = val;
us(i + 2, (j * numColsPerMaskEntry) + k) = val;
us(i + 3, (j * numColsPerMaskEntry) + k) = val;
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (colPointer == NULL)
return;
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = colPointer[i];
us(i + 1, j) = colPointer[i + 1];
us(i + 2, j) = colPointer[i + 2];
us(i + 3, j) = colPointer[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = colPointer[i];
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = val;
us(i + 1, j) = val;
us(i + 2, j) = val;
us(i + 3, j) = val;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = val;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1)
LogicError("The valMat matrix has incorrect number of rows or columns.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = valMat(i, 0);
us(i + 1, j) = valMat(i + 1, 0);
us(i + 2, j) = valMat(i + 2, 0);
us(i + 3, j) = valMat(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = valMat(i, 0);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom)
{
if (this == &deepCopyFrom)
return;
SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0);
}
#if 0
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom)
{
deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols());
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
#endif
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
if (pArray == nullptr && numRows * numCols > 0)
InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols));
SetFormat(matrixFormatDense);
SetComputeDeviceId(CPUDEVICE);
// if it's externally managed, then populate the structure
if (matrixFlags & matrixFlagDontOwnBuffer)
{
// free previous array allocation if any before overwriting
delete[] Buffer();
m_numRows = numRows;
m_numCols = numCols;
SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true);
SetSizeAllocated(GetNumElements());
}
else
{
RequireSize(numRows, numCols);
if (!IsEmpty())
{
if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure
memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType));
else // need to transpose
{
ElemType* bufPtr = Data();
auto& us = *this;
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, us)
{
cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, us)
{
{
#pragma warning(suppress : 4244)
cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1);
}
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v)
{
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = v;
us(i + 1, i + 1) = v;
us(i + 2, i + 2) = v;
us(i + 3, i + 3) = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = v;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector)
{
if (IsEmpty() || vector.IsEmpty())
LogicError("SetDiagonalValue: Matrix is empty.");
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1)
LogicError("SetDiagonalValue: input vector must be a vector.");
if (vector.GetNumElements() == 1) // reduce to simple form
SetDiagonalValue(vector(0, 0));
else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows())
LogicError("SetDiagonalValue: input vector's dimension does not agree with [this].");
else
{
auto& us = *this;
long m = (long) GetNumRows();
if (vector.GetNumRows() == 1) // row vector
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(0, i);
us(i + 1, i + 1) = vector(0, i + 1);
us(i + 2, i + 2) = vector(0, i + 2);
us(i + 3, i + 3) = vector(0, i + 3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(0, i);
}
}
else
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(i, 0);
us(i + 1, i + 1) = vector(i + 1, 0);
us(i + 2, i + 2) = vector(i + 2, 0);
us(i + 3, i + 3) = vector(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(i, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::uniform_real_distribution<double> r((double)low, (double)high);
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = (ElemType)r(generator);
bufPtr[i + 1] = (ElemType)r(generator);
bufPtr[i + 2] = (ElemType)r(generator);
bufPtr[i + 3] = (ElemType)r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = (ElemType)r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(RNGHandle& rngHandle, const ElemType low, const ElemType high)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<double> r((double)low, (double)high);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r]() {return (ElemType)r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(RNGHandle& rngHandle, const ElemType mean, const ElemType stdev)
{
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::normal_distribution<double> r((double)mean, (double)stdev);
auto n = AsMultipleOf(GetNumElements(), 2);
std::generate(Data(), Data() + n, [&cpuRNGHandle, &r]() {return (ElemType)r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGumbelRandomValue(RNGHandle& rngHandle, const ElemType loc, const ElemType scale)
{
if (IsEmpty())
LogicError("SetGumbelRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<double> r(0, 1);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r, loc, scale]() {return (ElemType)(loc - scale * log(-log1p(-r(cpuRNGHandle->Generator())))); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetGaussianRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<double> r((double)mean, (double)sigma);
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord (i, j, us)
{
us(i, j) = (ElemType)r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetTruncatedNormalRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetTruncatedNormalRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetTruncatedNormalRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long)time(NULL) : seed);
boost::random::normal_distribution<double> r((double)mean, (double)sigma);
const ElemType high = mean + 2 * sigma;
const ElemType low = mean - 2 * sigma;
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord(i, j, us)
{
ElemType tmp = 0;
do
tmp = (ElemType)r(generator);
while (tmp < low || tmp > high ); // Rejection sampling is fine here because the acceptance probability is about 0.9545
us(i, j) = tmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<double> r((double)mean, (double)sigma);
long m = (long) GetNumRows(), n = (long) GetNumCols();
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = (ElemType)r(generator);
us(i + 1, j) = (ElemType)r(generator);
us(i + 2, j) = (ElemType)r(generator);
us(i + 3, j) = (ElemType)r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = r(generator);
}
}
}
//maskRate: percentage of values masked out (similar to dropout rate)
//scaleValue: which scale value to set to the left ones (unmasked items).
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
auto& us = *this;
boost::random::uniform_real_distribution<double> r(0, 1);
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType v;
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
v = (ElemType)r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? (ElemType)0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 1, j) = v <= maskRate ? (ElemType)0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 2, j) = v <= maskRate ? (ElemType)0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 3, j) = v <= maskRate ? (ElemType)0 : scaleValue;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v = (ElemType)r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? (ElemType)0 : scaleValue;
}
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier)
{
ElemType aveMultiplier = 0;
if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows())
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols());
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols())
LogicError("The matrix gradients must have the same rows and columns as this matrix.");
ElemType *a = Data(), *d_v = gradients.Data();
size_t n = GetNumElements();
const ElemType floor = 1e-16f;
ElemType a0, a1, a2, a3;
// disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used.
// #pragma omp parallel for
for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling
{
a[i] += d_v[i] * d_v[i];
a[i + 1] += d_v[i + 1] * d_v[i + 1];
a[i + 2] += d_v[i + 2] * d_v[i + 2];
a[i + 3] += d_v[i + 3] * d_v[i + 3];
a0 = sqrt(a[i] + floor);
a1 = sqrt(a[i + 1] + floor);
a2 = sqrt(a[i + 2] + floor);
a3 = sqrt(a[i + 3] + floor);
d_v[i] /= a0;
d_v[i + 1] /= a1;
d_v[i + 2] /= a2;
d_v[i + 3] /= a3;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3;
}
}
// get the last few elements if any
for (long i = n & ~3; i < n; i++)
{
a[i] += d_v[i] * d_v[i];
a0 = sqrt(a[i] + floor);
d_v[i] /= a0;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0;
}
}
if (needAveMultiplier && n > 0)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients,
CPUMatrix<ElemType>& functionValues,
ElemType learnRatePerSample,
ElemType momentum,
ElemType adaWeight,
ElemType adaMul,
ElemType unitGainFactor)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
if (adaSqr != 0.0f)
{
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * ((ElemType) 1.0 / ada);
if (w > 10.0f)
w = 10.0f;
g *= w;
}
if (momentum > 0.0f)
{
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
}
g *= learnRatePerSample;
val[i] -= g;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample,
ElemType momentum, ElemType adaWeight, ElemType adaMul, ElemType epsilon, ElemType unitGainFactor, bool adamax)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType ada;
if (!adamax)
{
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
ada = sqrt(adaSqr);
}
else
ada = smoothAda[i] = std::max(adaWeight * smoothAda[i], fabs_(g));
ElemType w = adaMul * (ElemType)( 1.0 / (ada + epsilon));
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
val[i] -= g * w * learnRatePerSample;
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients,
ElemType RMS_GAMMA,
ElemType RMS_WGT_INC,
ElemType RMS_WGT_MAX,
ElemType RMS_WGT_DEC,
ElemType RMS_WGT_MIN,
const bool needAveMultiplier,
const bool initialized)
{
const ElemType floor = 1e-6f;
size_t n = gradients.GetNumElements();
ElemType* curr_grad = gradients.Data();
if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3 || !initialized)
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3);
SetValue(0.0);
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* steps = Data() + 2 * n; // current step size
// initialize moving average of gradient-squared
for (long i = 0; i < n; i++)
avars[i] = curr_grad[i] * curr_grad[i];
// initialize starting step size
for (long i = 0; i < n; i++)
steps[i] = ElemType(0.02);
}
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* signs = Data() + n; // sign of previous gradient
ElemType* steps = Data() + 2 * n; // current step size
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3)
LogicError("The matrix gradients does not have expected dimensions.");
ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA;
// int upd[] = {
// 2,2,0,
// 2,2,0,
// 1,1,1,
// 2,2,0,
// 1,2,1,
// 0,2,2,
// 1,1,1,
// 0,2,2,
// 0,2,2,
// };
// for (long i=0; i<n; i++)
// {
// avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
// // grad sign base 3: 0->neg, 1->zero, 2->pos
// const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
// // signs[i] contains three consecutive grad_sign
// signs[i] = 3*(int(signs[i]) % 9) + grad_sign;
// switch(upd[int(signs[i])])
// {
// case 0:
// steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
// break;
// case 2:
// steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
// break;
// }
// curr_grad[i] *= steps[i] / sqrt(avars[i] + floor);
// }
ElemType aveMultiplier = 0, a;
for (long i = 0; i < n; i++)
{
avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
if (signs[i] * grad_sign > 0)
steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
else
steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
a = steps[i] / sqrt(avars[i] + floor);
curr_grad[i] *= a;
signs[i] = (ElemType) grad_sign;
if (needAveMultiplier)
aveMultiplier += a;
}
if (needAveMultiplier)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
template <typename GradType>
void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<GradType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
GradType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothX2 = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = (ElemType)grad[i];
ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g;
smoothAda[i] = adaSqr;
ElemType x2 = smoothX2[i];
ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g;
smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX;
val[i] += learningRate * deltaX;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AdaDeltaFlushTimestamps(size_t cols, ElemType rho, int* timestamps, int currentTimestamp)
{
// Sets all timestamps to 0 and updates the two logical buffers that this object holds
// so that their values are the same as if a dense implementation of adadelta had been used.
// This basically means that the values of these buffers are set to decay * original value
// where decay is rho ** (currentTimestamp - timestamp for that column)
auto rows = GetNumRows();
auto smoothAda = Data();
auto smoothX2 = Data() + cols * rows;
#pragma omp parallel for
for (auto col = 0; col < cols; ++col)
{
ElemType decay = std::pow(rho, ElemType(currentTimestamp - timestamps[col]));
auto offset = rows * col;
timestamps[col] = 0;
for (auto row = 0; row < rows; ++row)
{
smoothAda[offset + row] *= decay;
smoothX2[offset + row] *= decay;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols)
{
if (numRows * numCols != GetNumElements())
InvalidArgument("Reshape: Total number of elements does not match.");
m_numRows = numRows;
m_numCols = numCols;
}
// RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size.
template <class ElemType>
void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() != numRows || GetNumCols() != numCols)
Resize(numRows, numCols, growOnly);
}
// Resize() -- change matrix size
// This function is cheap if the matrix size does not change.
// Current content is not preserved.
// If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink).
// If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape).
template <class ElemType>
void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() == numRows && GetNumCols() == numCols)
return;
VerifyResizable(__func__);
size_t numElements = numRows * numCols;
if (numElements > GetSizeAllocated() || // grow allocation
(!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly')
{
// reallocate buffer
ElemType* pArray = nullptr;
if (numElements > 0)
{
pArray = NewArray<ElemType>(numElements);
}
// success: update the object
delete[] Buffer();
SetBuffer(pArray, numElements * sizeof(ElemType));
SetSizeAllocated(numElements);
}
// success
m_sliceViewOffset = 0;
m_numRows = numRows;
m_numCols = numCols;
}
// allocated by the callee but should be deleted by the caller
// TODO: change to use STL vector instead
template <class ElemType>
ElemType* CPUMatrix<ElemType>::CopyToArray() const
{
size_t numElements = GetNumElements();
if (numElements != 0)
{
ElemType* arrayCopyTo = NewArray<ElemType>(numElements);
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
return arrayCopyTo;
}
else
{
return nullptr;
}
}
//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done
//return number of elements copied
template <class ElemType>
size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const
{
size_t numElements = GetNumElements();
if (numElements > currentArraySize)
{
delete arrayCopyTo;
arrayCopyTo = NewArray<ElemType>(numElements);
currentArraySize = numElements;
}
if (numElements != 0)
{
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
}
return numElements;
}
template <typename ElemType>
void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const
{
// REVIEW alexeyk: currently not used by CPU, but implement when possible.
RuntimeError("Not implemented.");
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(col == 0 || col < GetNumCols());
return col * m_numRows; // matrix in column-wise storage
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(row < m_numRows);
return LocateColumn(col) + row; // matrix in column-wise storage
}
#pragma endregion Basic Operators
#pragma region Member BLAS Functions
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha)
{
return AssignSumOf(alpha, *this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignSumOf(alpha, *this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha + a(i, j);
us(i + 1, j) = alpha + a(i + 1, j);
us(i + 2, j) = alpha + a(i + 2, j);
us(i + 3, j) = alpha + a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha + a(i, j);
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
//if a is a scalar, add it to all elements.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a)
{
// if (a.GetNumElements() == 1)
// *this += a(0,0);
// else
ScaleAndAdd(1, a, *this);
return *this;
}
//if [this] and a have same dimension then OUTPUT=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const
{
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c(a);
c += (*this)(0, 0);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c(*this);
c += a(0, 0);
return c;
}
else
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c += a;
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.GetNumElements() == 1)
{
SetValue(b);
(*this) += a;
}
else
{
SetValue(a);
(*this) += b;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha)
{
return AssignDifferenceOf(*this, alpha);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignDifferenceOf(*this, alpha);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha - a(i, j);
us(i + 1, j) = alpha - a(i + 1, j);
us(i + 2, j) = alpha - a(i + 2, j);
us(i + 3, j) = alpha - a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha - a(i, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) - alpha;
us(i + 1, j) = a(i + 1, j) - alpha;
us(i + 2, j) = a(i + 2, j) - alpha;
us(i + 3, j) = a(i + 3, j) - alpha;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) - alpha;
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(-1, a, *this);
return *this;
}
//if [this] and a have same dimension then output=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c -= a;
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (this != &a)
{
RequireSize(a.GetNumRows(), a.GetNumCols());
SetValue(a);
}
(*this) -= b;
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha)
{
Scale(alpha, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
Scale(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
Scale(alpha, a, *this);
return *this;
}
// [this]=a*b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB)
{
if (a.GetNumElements() == 1)
{
if (transposeB)
AssignTransposeOf(b);
(*this) *= a(0, 0);
}
else if (b.GetNumElements() == 1)
{
if (transposeA)
AssignTransposeOf(a);
(*this) *= b(0, 0);
}
else
Multiply(a, transposeA, b, transposeB, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const
{
auto& us = *this;
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(us(0, 0), a);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(a(0, 0), us);
return c;
}
else
{
CPUMatrix<ElemType> c;
Multiply(*this, a, c);
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha)
{
(*this) *= 1 / alpha;
return (*this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const
{
return ((*this) * (1 / alpha));
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha)
{
auto& us = *this;
ElementWisePower(alpha, us, us);
return us;
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
ElementWisePower(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power)
{
ElementWisePower(power, a, *this);
return *this;
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
return AssignElementProductOf(*this, a);
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a)
{
return AssignElementDivisionOf(*this, a);
}
//[this]=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) * b(i, j);
us(i + 1, j) = a(i + 1, j) * b(i + 1, j);
us(i + 2, j) = a(i + 2, j) * b(i + 2, j);
us(i + 3, j) = a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) * b(i, j);
}
}
return *this;
}
//[this] +=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this].");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) += a(i, j) * b(i, j);
us(i + 1, j) += a(i + 1, j) * b(i + 1, j);
us(i + 2, j) += a(i + 2, j) * b(i + 2, j);
us(i + 3, j) += a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) += a(i, j) * b(i, j);
}
}
return *this;
}
//[this]=a ./ b
// TODO: This clips the divisor by a small value. Is that really what one would want?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementDivisionOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
foreach_coord (i, j, us)
{
ElemType v = b(i, j);
if (v >= 0 && v < smallValue)
us(i, j) = a(i, j) / smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) = a(i, j) / (-smallValue);
else
us(i, j) = a(i, j) / v;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= a(i, 0);
us(i + 1, j) *= a(i + 1, 0);
us(i + 2, j) *= a(i + 2, 0);
us(i + 3, j) *= a(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= a(i, 0);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= v;
us(i + 1, j) *= v;
us(i + 2, j) *= v;
us(i + 3, j) *= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
if (v >= 0 && v < EPS_IN_INVERSE)
v = EPS_IN_INVERSE;
else if (v < 0 && v > -EPS_IN_INVERSE)
v = (-EPS_IN_INVERSE);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) /= v;
us(i + 1, j) /= v;
us(i + 2, j) /= v;
us(i + 3, j) /= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) /= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
for (long i = 0; i < m; i++)
{
ElemType v = a(i, 0);
if (v >= 0 && v < smallValue)
us(i, j) /= smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) /= (-smallValue);
else
us(i, j) /= v;
}
}
return *this;
}
//[this]=1 ./ a
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse()
{
return AssignElementInverseOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a)
{
ElemType smallValue = EPS_IN_INVERSE;
if (a.IsEmpty())
LogicError("AssignElementInverseOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) < 0 && a(i, j) > -smallValue)
us(i, j) = 1 / (-smallValue);
else if (a(i, j) >= 0 && a(i, j) < smallValue)
us(i, j) = 1 / smallValue;
else
us(i, j) = 1 / a(i, j);
}
return *this;
}
//[this]=sigmoid([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid()
{
return AssignSigmoidOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) >= 0)
us(i, j) = 1 / (1 + exp(-a(i, j)));
else
{
ElemType v = exp(a(i, j));
us(i, j) = v / (1 + v);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative()
{
return AssignLinearRectifierDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f;
us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f;
us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative()
{
return AssignSigmoidDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
ElemType v1 = a(i + 1, j);
us(i + 1, j) = v1 * (1 - v1);
ElemType v2 = a(i + 2, j);
us(i + 2, j) = v2 * (1 - v2);
ElemType v3 = a(i + 3, j);
us(i + 3, j) = v3 * (1 - v3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
}
}
return *this;
}
//[this]=tanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh()
{
return AssignTanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignTanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = tanh(a(i, j));
us(i + 1, j) = tanh(a(i + 1, j));
us(i + 2, j) = tanh(a(i + 2, j));
us(i + 3, j) = tanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = tanh(a(i, j));
}
}
return *this;
}
//[this]=atanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAtanh()
{
return AssignAtanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAtanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAtanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = atanh(a(i, j));
us(i + 1, j) = atanh(a(i + 1, j));
us(i + 2, j) = atanh(a(i + 2, j));
us(i + 3, j) = atanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = atanh(a(i, j));
}
}
return *this;
}
//[this]=softmax([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise)
{
return AssignLogSoftmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignLogSoftmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(0, j);
foreach_row (i, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_row (i, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_row (i, us)
us(i, j) -= sum;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(i, 0);
foreach_column (j, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_column (j, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_column (j, us)
us(i, j) -= sum;
}
}
return *this;
}
//[this]=hardmax([this])
//the max element is 1 else is 0
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise)
{
return AssignHardmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignHardmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
bool isInplace = (us.Data() == a.Data());
if (!isInplace)
memset(us.Data(), 0, a.GetNumElements() * sizeof(ElemType));
if (isColWise)
{
foreach_column (j, a)
{
// we need to extract max
ElemType maxV = a(0, j);
long maxI = 0;
foreach_row (i, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxI = i;
}
}
if (isInplace)
memset(us.Data() + j * a.GetNumRows(), 0, a.GetNumRows() * sizeof(ElemType));
us(maxI, j) = 1.0f;
}
}
else
{
foreach_row (i, a)
{
// we need to extract max
ElemType maxV = a(i, 0);
long maxJ = 0;
foreach_column (j, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxJ = j;
}
}
if (isInplace)
{
foreach_column(j, us)
us(i, j) = (j == maxJ) ? 1.0f : 0.0f;
}
else
us(i, maxJ) = 1.0f;
}
}
return *this;
}
//[this]=sqrt([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt()
{
return AssignSqrtOf(*this);
}
//to prevent negative values caused by floating operations, we force inputs to be >=0
//this may, however, hide problems in the caller.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSqrtOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j)));
us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j)));
us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j)));
}
// remaining
for (long i = m & ~3; i < m; i++)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp()
{
return AssignExpOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignExpOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = exp(a(i, j));
us(i + 1, j) = exp(a(i + 1, j));
us(i + 2, j) = exp(a(i + 2, j));
us(i + 3, j) = exp(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = exp(a(i, j));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs()
{
return AssignAbsOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAbsOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = abs(a(i, j));
us(i + 1, j) = abs(a(i + 1, j));
us(i + 2, j) = abs(a(i + 2, j));
us(i + 3, j) = abs(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = abs(a(i, j));
}
}
return *this;
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog()
{
return AssignLogOf(*this);
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10()
{
return AssignLog10Of(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v < EPS_IN_LOG)
{
us(i, j) = LOG_OF_EPS_IN_LOG;
}
else
us(i, j) = log(v);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v <= 0)
LogicError("AssignLogOf: Log can only applied to numbers larger than 0.");
else if (v < EPS_IN_LOG)
{
us(i, j) = LOG10_OF_EPS_IN_LOG;
}
else
us(i, j) = log10(v);
}
return *this;
}
//[this]=cos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine()
{
return AssignCosineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cos(v);
}
return *this;
}
//[this]=-sin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine()
{
return AssignNegativeSineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = -sin(v);
}
return *this;
}
//[this]=acos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAcos()
{
return AssignAcosOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAcosOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAcosOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = acos(v);
}
return *this;
}
//[this]=asin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAsin()
{
return AssignAsinOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAsinOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAsinOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = asin(v);
}
return *this;
}
//[this]=cosh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosh()
{
return AssignCoshOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCoshOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCoshOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cosh(v);
}
return *this;
}
//[this]=sinh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSinh()
{
return AssignSinhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSinhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSinhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = sinh(v);
}
return *this;
}
//[this]=asinh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAsinh()
{
return AssignAsinhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAsinhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAsinhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = asinh(v);
}
return *this;
}
//Threshold truncating: this[i] = max( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateBottom: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
if (us(i + 1, j) < threshold)
us(i + 1, j) = threshold;
if (us(i + 2, j) < threshold)
us(i + 2, j) = threshold;
if (us(i + 3, j) < threshold)
us(i + 3, j) = threshold;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
auto& us = *this;
ElemType locThresholdPos = abs(threshold);
ElemType locTHresholdNeg = -locThresholdPos;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
if (us(i + 1, j) > locThresholdPos)
us(i + 1, j) = locThresholdPos;
else if (us(i + 1, j) < locTHresholdNeg)
us(i + 1, j) = locTHresholdNeg;
if (us(i + 2, j) > locThresholdPos)
us(i + 2, j) = locThresholdPos;
else if (us(i + 2, j) < locTHresholdNeg)
us(i + 2, j) = locTHresholdNeg;
if (us(i + 3, j) > locThresholdPos)
us(i + 3, j) = locThresholdPos;
else if (us(i + 3, j) < locTHresholdNeg)
us(i + 3, j) = locTHresholdNeg;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
}
}
return *this;
}
//x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
#pragma omp parallel for
for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
if (bufPtr[i + 1] > threshold)
bufPtr[i + 1] -= threshold;
else if (bufPtr[i + 1] < -threshold)
bufPtr[i + 1] += threshold;
else
bufPtr[i + 1] = 0;
if (bufPtr[i + 2] > threshold)
bufPtr[i + 2] -= threshold;
else if (bufPtr[i + 2] < -threshold)
bufPtr[i + 2] += threshold;
else
bufPtr[i + 2] = 0;
if (bufPtr[i + 3] > threshold)
bufPtr[i + 3] -= threshold;
else if (bufPtr[i + 3] < -threshold)
bufPtr[i + 3] += threshold;
else
bufPtr[i + 3] = 0;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
}
return *this;
}
//Threshold truncating: this[i] = max( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateBottomOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) < threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = min( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateTop: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) > threshold)
us(i, j) = threshold;
}
return *this;
}
//Threshold truncating: this[i] = min( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateTopOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) > threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = 0 if abs(this[i]<threshold).
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold)
{
if (IsEmpty())
LogicError("SetToZeroIfAbsLessThan: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (abs(us(i, j)) < threshold)
us(i, j) = 0;
}
return *this;
}
//sum of all abs(elements)
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfAbsElements() const
{
if (IsEmpty())
LogicError("SumOfAbsElements: Matrix is empty.");
if (std::is_same<ElemType, double>::value)
{
return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1);
}
else
{
RuntimeError("Unsupported data format");
}
}
//sum of all elements
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfElements() const
{
if (IsEmpty())
LogicError("SumOfElements: Matrix is empty.");
ElemType sum = 0;
long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : sum)
for (long i = 0; i < (m & ~3); i += 4)
{
sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
sum += bufPtr[i];
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOfElements: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.SumOfElements();
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis)
{
if (a.IsEmpty())
LogicError("AssignOneHot: Matrix a is empty.");
if (axis >= shape.size())
LogicError("AssignOneHot: axis is not correct");
size_t item_size = 1;
for (size_t i = 0; i < shape.size() && i < axis; i++)
item_size *= shape[i];
size_t num_class = shape[axis];
auto& us = *this;
auto nCols = a.GetNumCols();
auto nRows = num_class * a.GetNumRows();
us.RequireSize(nRows, nCols);
ElemType* bufPtr = Data();
ElemType* aBufPtr = a.Data();
memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols);
#pragma omp parallel for
for (long i = 0; i < a.GetNumElements(); i++)
{
if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class)
{
size_t block_id = i / item_size;
size_t item_id = i % item_size;
bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GatherFromTarget(const CPUMatrix<ElemType>& indices, const CPUMatrix<ElemType>& target, size_t row_elements)
{
if (indices.IsEmpty() || target.IsEmpty())
LogicError("GatherFromTarget: input matrix is empty.");
if (row_elements == 0)
LogicError("GatherFromTarget: target matrix at least need 1 dim.");
auto nCols = indices.GetNumCols();
auto nRows = indices.GetNumRows() * row_elements;
this->RequireSize(nRows, nCols);
ElemType* indicesBufPtr = indices.Data();
ElemType* targetBufPtr = target.Data();
ElemType* buffer = Data();
#pragma omp parallel for
for (int i = 0; i < indices.GetNumElements(); i++)
{
memcpy(buffer + i * row_elements, targetBufPtr + ((size_t)indicesBufPtr[i] * row_elements), sizeof(ElemType) * row_elements);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ScatterToIndices(const CPUMatrix<ElemType>& values, const CPUMatrix<ElemType>& indices, size_t row_elements)
{
if (indices.IsEmpty() || values.IsEmpty())
LogicError("ScatterToIndices: input matrix is empty.");
ElemType* indicesBufPtr = indices.Data();
ElemType* valueBufPtr = values.Data();
ElemType* buffer = Data();
ScatterValues(indicesBufPtr, valueBufPtr, buffer, (ElemType)1, indices.GetNumElements(), row_elements, this->GetNumCols());
return *this;
}
template <class ElemType>
bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const
{
return AreEqual(*this, a, threshold);
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty())
LogicError("VectorSum: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, a)
{
ElemType v = 0;
foreach_row (i, a)
{
#pragma omp atomic
v += a(i, j);
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, a)
{
ElemType v = 0;
foreach_column (j, a)
{
#pragma omp atomic
v += a(i, j);
}
c(i, 0) = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm1: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm1(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm2: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
ElemType* bufPtr = us.Data();
if (isColWise) // col-wise
{
c.RequireSize(1, n);
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else if(std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
c.RequireSize(m, 1);
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm2(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNormInf: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
// #pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
v = std::max(v, fabs_(us(i, j)));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
// #pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
v = std::max(v, fabs_(us(i, j)));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNormInf(*this, isColWise);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise)
{
InnerProduct(a, b, *this, isColWise);
return *this;
}
//column-wise crossproduct
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignKhatriRaoProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
RequireSize(rowsA * rowsB, cols);
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
for (long k = 0; k < cols; k++)
{
long jj = 0;
for (long j = 0; j < rowsB; j++)
{
for (long i = 0; i < rowsA; i++)
{
(*this)(jj++, k) = a(i, k) * b(j, k);
}
}
}
return *this;
}
//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient
// this = reshape each column of a from (K1xK2,1) to (K1, K2)
// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames).
// the output is a (K1, frames) matrix
// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddColumnReshapeProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
if (rowsA % rowsB != 0)
InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b.");
long rowsC = rowsA / rowsB;
if (rowsC != GetNumRows() || cols != GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size.");
auto& us = *this;
if (transposeAColumn)
{
// find nrows and ncols of tbe reshaped a
long nrows = rowsB;
long ncols = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++) // row and col is transposed
{
ElemType v = 0;
for (size_t i = 0; i < nrows; i++)
{
v += a(k, t) * b(i, t);
k++;
}
us(j, t) += v;
}
}
}
else
{
size_t ncols = rowsB;
size_t nrows = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++)
{
for (size_t i = 0; i < nrows; i++)
{
us(i, t) += a(k, t) * b(j, t);
k++;
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(alpha, a, *this);
return *this;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::FrobeniusNorm() const
{
if (IsEmpty())
LogicError("FrobeniusNorm: Matrix is empty.");
ElemType v = 0;
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : v)
for (long i = 0; i < (m & ~3); i += 4)
{
v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v += bufPtr[i] * bufPtr[i];
}
return sqrt(v);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignFrobeniusNormOf: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.FrobeniusNorm();
return us;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNormInf() const
{
if (IsEmpty())
LogicError("MatrixNormInf: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
#pragma omp critical
{
v = std::max(v, fabs_(us(i, j)));
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm0() const
{
if (IsEmpty())
LogicError("MatrixNorm0: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) != 0)
{
#pragma omp critical
{
++v;
}
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm1() const
{
if (IsEmpty())
LogicError("MatrixNorm1: Matrix is empty.");
auto& us = *this;
ElemType sum = 0;
#pragma omp parallel for reduction(+ : sum)
foreach_coord (i, j, us)
{
sum += abs(us(i, j));
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AddSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
//I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation
template <class ElemType>
void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const
{
if (IsEmpty())
LogicError("VectorMax: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
if (topK > m)
InvalidArgument("VectorMax: TopK must be less or equal than the number of rows");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
maxValues.RequireSize(topK, n);
maxIndexes.RequireSize(topK, n);
if (topK == 1)
{
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v < us(i, j))
{
index = i;
v = us(i, j);
}
}
maxValues(0, j) = v;
maxIndexes(0, j) = (ElemType) index;
}
}
else
{
std::vector<int> indices(m);
const ElemType* curVal = Data();
ElemType* curIdx = maxIndexes.Data();
ElemType* curMax = maxValues.Data();
for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK)
{
std::iota(indices.begin(), indices.end(), 0);
// Partial sort, descending order.
std::partial_sort(indices.begin(), indices.begin() + topK, indices.end(),
[curVal](const int& a, const int& b)
{
return curVal[a] > curVal[b];
});
// REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead.
// std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); });
for (int i2 = 0; i2 < topK; i2++)
{
curIdx[i2] = static_cast<ElemType>(indices[i2]);
curMax[i2] = curVal[indices[i2]];
}
}
}
}
else
{
if (topK > 1)
RuntimeError("Row-wise TopK max is not supported.");
maxValues.RequireSize(m, 1);
maxIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v < us(i, j))
{
index = j;
v = us(i, j);
}
}
maxValues(i, 0) = v;
maxIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorMin: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
minValues.RequireSize(1, n);
minIndexes.RequireSize(1, n);
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v > us(i, j))
{
index = i;
v = us(i, j);
}
}
minValues(0, j) = v;
minIndexes(0, j) = (ElemType) index;
}
}
else
{
minValues.RequireSize(m, 1);
minIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v > us(i, j))
{
index = j;
v = us(i, j);
}
}
minValues(i, 0) = v;
minIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol)
{
if (a.GetNumCols() != b.GetNumCols())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns.");
if (!searchInCol && a.GetNumRows() != b.GetNumRows())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows.");
ElemType n = 0;
if (!searchInCol)
{
foreach_coord (i, j, a)
{
n += (a(i, j) != b(i, j));
}
}
else
{
size_t crow = b.GetNumRows();
const ElemType* curCol = b.Data();
for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow)
{
auto res = std::find(curCol, curCol + crow, a(0, icol));
if (res == curCol + crow)
n++;
}
}
RequireSize(1, 1); // result should be one element
(*this)(0, 0) = n;
return *this;
}
#pragma endregion Member BLAS Functions
#pragma region Other helper Functions
struct PrintRange
{
// print from begin to skipBegin, then from skipEnd to end
// skipBegin = end if no split
size_t begin;
size_t skipBegin;
size_t skipEnd;
size_t end;
bool IsEmpty() const { return end <= begin; }
// examples:
// * 3..10
// * -3..-3: include end-3..end and 0..3
PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total)
{
if (first >= 0 && last >= 0)
{
begin = (size_t)first;
end = (size_t)last + 1;
if (end > total) // allow INT_MAX, meaning to end
end = total;
skipBegin = end;
skipEnd = end;
}
else if (first < 0 && last < 0)
{
begin = 0;
skipBegin = (size_t)(-last);
skipEnd = (size_t)(total + first);
if (skipEnd <= skipBegin)
skipBegin = skipEnd = total;
end = total;
}
else // if other combinations are ever of interest then implement them here
LogicError("Print: Bounds must be either both positive or both negative.");
}
};
// use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const
{
fprintf(stderr, "\n###### ");
if (matrixName != nullptr)
fprintf(stderr, "%s ", matrixName);
fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols());
if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols())
fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast);
fprintf(stderr, " ######\n\n");
if (IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
PrintRange rowRange(rowFirst, rowLast, GetNumRows());
PrintRange colRange(colFirst, colLast, GetNumCols());
if (rowRange.IsEmpty() || colRange.IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
const auto& us = *this;
if (rowRange.begin > 0)
fprintf(stderr, "...\n");
for (size_t i = rowRange.begin; i < rowRange.end; i++)
{
if (i == rowRange.skipBegin) // insert ... between the two blocks if any
{
fprintf(stderr, "...\n");
i = rowRange.skipEnd;
}
if (colRange.begin > 0) // ... at line start
fprintf(stderr, "...\t");
for (size_t j = colRange.begin; j < colRange.end; j++)
{
if (j == colRange.skipBegin)
{
fprintf(stderr, "...\t");
j = colRange.skipEnd;
}
fprintf(stderr, "%.10f\t", (double)us(i, j));
}
if (colRange.end < GetNumCols()) // ... at line end
fprintf(stderr, "...");
fprintf(stderr, "\n");
}
if (rowRange.end < GetNumRows())
fprintf(stderr, "...\n");
}
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const
{
Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1);
}
// file I/O
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding)
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth).");
const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels;
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize);
if (zeroPadding)
SetValue((ElemType) 0);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
(*this)(packRow, packCol) = currentInputValue;
}
packColBase += (long) outputHeight;
}
}
}
return *this;
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding) const
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth).");
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
currentInputValue += (*this)(packRow, packCol);
}
packColBase += (long) outputHeight;
}
inputSubBatch(id, sample) = currentInputValue;
}
}
return inputSubBatch;
}
//assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < (long) batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType maxVal = -FLT_MAX;
ElemType minVal = FLT_MAX;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels];
maxVal = std::max(maxVal, val);
minVal = std::min(minVal, val);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = maxVal;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = inputBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = (long) (nXC / channels); // row in input
const long c = (long) (nXC % channels); // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
ElemType inputValue = inputBatch(inputIndexWithinSample, sample);
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c);
if (inputValue == outputBatch(outputIndex, sample))
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
const size_t windowSize = windowWidth * windowHeight;
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType sum = 0;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
sum += inputBatch(rowInInput, sample);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = sum / windowSize;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = outputGradientBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const long windowSize = (long) (windowWidth * windowHeight);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = nXC / (long) channels; // row in input
const long c = nXC % (long) channels; // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c;
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize;
}
}
}
}
return *this;
}
#pragma endregion Other Helper Functions
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample);
}
output(row, sample) = sum;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i];
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const
{
// Do NOT parallelize these loops!
for (size_t sample = 0; sample < GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < in.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows());
kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
if (mpRowCol.GetNumRows() % mapOutCount != 0)
InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount.");
size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount;
size_t batchSize = GetNumCols();
size_t kernelSize = runs(1, 0);
if (kernelSize % mapInCount != 0)
InvalidArgument("kernelSize must be multiple of mapInCount.");
size_t kernelMapSize = kernelSize / mapInCount;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < std::min(size, (int)kernelMapSize); i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
size_t isrc = row;
size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount;
for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize)
{
assert(isrc < GetNumElements());
assert(idst + outMap < output.GetNumElements());
output.Data()[idst + outMap] = (*this)(isrc, sample);
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
size_t unrollCols = mapOutSize * batchSize;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
size_t idst = (skip + i) * unrollCols + row * batchSize + sample;
assert(idst < output.GetNumElements());
output.Data()[idst] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
assert(std::numeric_limits<ElemType>::has_infinity);
ElemType res = -std::numeric_limits<ElemType>::infinity();
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
res = std::max(res, (*this)(colBase + dcol, sample));
}
output(row, sample) = res;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in,
const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices,
CPUMatrix<ElemType>& grad, bool accumulateGradient) const
{
if (!accumulateGradient)
grad.SetValue((ElemType)0);
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType g = (*this)(row, sample);
ElemType m = out(row, sample);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
if (in(colBase + dcol, sample) >= m)
{
#pragma omp atomic
grad(colBase + dcol, sample) += g;
break;
}
}
}
}
}
// For each image, for each ROI, this function treats that ROI as an image
// and does max pooling so that it has output size pooledHeight x pooledWidth.
// It loops over each location in the output tensor, computes which ROI
// and image should populate that location, computes the subset of the image
// corresponding to the ROI and which pixels in that subset should go into the
// output location, then takes the max value over that window.
// src: Images [W x H x C x N]
// roiData: ROIs [4 x numROIs x N],
// dst: Pooled ROIs [PW x PH x C x numROIs x N]
// argmax: max positions [PW x PH x C x numROIs x N]
// spatialScale ratio of input feature map to the original image.
// where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size
template <class ElemType>
void CPUMatrix<ElemType>::MaxROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output,
CPUMatrix<ElemType>& argmax, double spatialScale) const
{
size_t roiOutputSize = pooledHeight * pooledWidth * channels;
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
auto img = ColumnSlice(imgIdx, 1);
auto rois = roiData.ColumnSlice(imgIdx, 1);
#pragma omp parallel for
for (int roiIdx = 0; roiIdx < numRois; roiIdx++)
{
// each ROI is 4 elements: (x, y, w, h).
int base = roiIdx * 4;
// roi points represent the absolute location of the roi
// in the original image.
ElemType scX1 = rois(base, (ElemType)0);
ElemType scY1 = rois(base + (ElemType)1, (ElemType)0);
ElemType scX2 = rois(base + (ElemType)2, (ElemType)0);
ElemType scY2 = rois(base + (ElemType)3, (ElemType)0);
// compute actual spatial location of the ROI in our featuremap.
size_t x1 = (size_t)round(scX1 * spatialScale);
size_t y1 = (size_t)round(scY1 * spatialScale);
size_t x2 = (size_t)round(scX2 * spatialScale);
size_t y2 = (size_t)round(scY2 * spatialScale);
ElemType roiW = (ElemType)max(x2 - x1 + 1, (size_t)1);
ElemType roiH = (ElemType)max(y2 - y1 + 1, (size_t)1);
const ElemType winW = roiW / (ElemType)pooledWidth;
const ElemType winH = roiH / (ElemType)pooledHeight;
// inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn
// loop over spatial locations in output.
#pragma omp parallel for
for (int outw = 0; outw < pooledWidth; outw++)
{
for (int outh = 0; outh < pooledHeight; outh++)
{
// compute the top left corner of the input
// spatial window corresponding to this output unit
size_t hstart = (size_t)floor(outh * winH);
size_t wstart = (size_t)floor(outw * winW);
// compute bottom right corner (not included)
size_t hend = (size_t)ceil((outh + 1) * winH);
size_t wend = (size_t)ceil((outw + 1) * winW);
// offset window based on ROI top left corner.
// these indices are into the input slice.
hstart = min(max(hstart + y1, (size_t)0), height);
wstart = min(max(wstart + x1, (size_t)0), width);
hend = min(max(hend + y1, (size_t)0), height);
wend = min(max(wend + x1, (size_t)0), width);
bool isempty = (hend <= hstart) || (wend <= wstart);
for (size_t c = 0; c < channels; c++)
{
// [W x H x C x R x N]; R = ROIs per image
size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth;
size_t maxidx = 0;
ElemType maxval = isempty ? (ElemType)0 : (ElemType)-FLT_MAX;
size_t baseIdx = c * height * width;
for (size_t h = hstart; h < hend; h++)
{
for (size_t w = wstart; w < wend; w++)
{
// stored argmax indices are relative to the current channel.
size_t dataIdx = w + h * width;
if (img(baseIdx + dataIdx, 0) > maxval)
{
maxval = img(baseIdx + dataIdx, 0);
maxidx = dataIdx;
}
}
}
output(outputIdx, imgIdx) = maxval;
argmax(outputIdx, imgIdx) = maxidx;
}
}
}
}
}
}
// This function loops over locations in the input to the ROIPoolingNode (image locations).
// It loops over the ROIs corresponding to that image, seeing which ones could contain the current location
// in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose
// this pixel location as the maximum. If so, it increments the gradient term for the input location.
template <class ElemType>
void CPUMatrix<ElemType>::MaxROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad,
CPUMatrix<ElemType>& argmax, double spatialScale) const
{
// loop over images in the batch.
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
// ROIs for this image. length 4*numRois;
auto rois = roiData.ColumnSlice(imgIdx, 1).Data();
// gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels;
auto pooledGrad = ColumnSlice(imgIdx, 1).Data();
auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data();
// loop over spatial locations in the image.
#pragma omp parallel for
for (int w = 0; w < width; w++)
{
#pragma omp parallel for
for (int h = 0; h < width; h++)
{
// loop over the ROIs seeing which ones contain this location.
for (int roiN = 0; roiN < numRois; roiN++)
{
// each ROI is 4 elements: (x, y, w, h).
int roiOffset = roiN * 4;
// ROI data points represent the absolute location of the roi
// in the original image.
size_t roiStartW = (size_t)round(rois[roiOffset + 0] * spatialScale);
size_t roiStartH = (size_t)round(rois[roiOffset + 1] * spatialScale);
size_t roiEndW = (size_t)round(rois[roiOffset + 2] * spatialScale);
size_t roiEndH = (size_t)round(rois[roiOffset + 3] * spatialScale);
size_t roiWidth = max(roiEndW - roiStartW + 1, (size_t)1);
size_t roiHeight = max(roiEndH - roiStartH + 1, (size_t)1);
// skip this ROI if it doesn't contain the current input location.
const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth &&
h >= roiStartH && h < roiStartH + roiHeight);
if (!inROI)
continue;
ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight;
ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth;
// what pooled nodes in the output for this ROI could have pooled this input location?
size_t phstart = (size_t)((h - roiStartH) / winH);
size_t pwstart = (size_t)((w - roiStartW) / winW);
size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH));
size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW));
phstart = min(max(phstart, (size_t)0), pooledHeight);
phend = min(max(phend, (size_t)0), pooledHeight);
pwstart = min(max(pwstart, (size_t)0), pooledWidth);
pwend = min(max(pwend, (size_t)0), pooledWidth);
for (size_t c = 0; c < channels; c++)
{
ElemType gradient = 0;
// [W x H x C x N]
size_t index = w + h*width + c*height*width;
// go right up to channel c of the current ROI.
size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight;
const ElemType* offsetPoolGrad = pooledGrad + offset;
const ElemType* offsetArgmax = argmaxCol + offset;
for (size_t ph = phstart; ph < phend; ph++)
{
for (size_t pw = pwstart; pw < pwend; pw++)
{
if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width))
{
gradient += offsetPoolGrad[ph * pooledWidth + pw];
}
}
}
#pragma omp atomic
grad(index, imgIdx) += gradient;
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices,
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput,
CPUMatrix<ElemType>& input) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < input.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType curMax = poolInput(colBase + indices(i0, 0), sample);
ElemType prevMax = curMax;
int imax = 0;
for (int i = 1; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows());
curMax = std::max(curMax, poolInput(colBase + dcol, sample));
if (curMax > prevMax)
{
prevMax = curMax;
imax = i;
}
}
int dcol = indices(i0 + imax, 0);
assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
input(colBase + dcol, sample) = (*this)(row, sample);
//int i = (int)poolIn(row, sample);
//assert(0 <= i && i < size);
//int dcol = indices(i0 + i, 0);
//assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
//input(colBase + dcol, sample) = (*this)(row, sample);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += (*this)(colBase + dcol, sample);
}
// Note that we divide by size which is the number of actual elements (does not include padding).
// if poolIncludePad == true, use avg_pool_include_pad
if (poolIncludePad)
size = indices(0, 0);
output(row, sample) = sum / size;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad, bool accumulateGradient) const
{
if (!accumulateGradient)
grad.SetValue((ElemType)0);
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
int tmp = size;
if (poolIncludePad)
size = indices(0, 0);
assert(size > 0);
ElemType g = (*this)(row, sample) / size;
size = tmp;
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
#pragma omp atomic
grad(colBase + dcol, sample) += g;
}
}
}
}
template <class ElemType>
template <class StatType>
void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<StatType>& scale, const CPUMatrix<StatType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor,
CPUMatrix<StatType>& runMean, CPUMatrix<StatType>& runVariance, CPUMatrix<ElemType>& out, double epsilon,
CPUMatrix<StatType>& saveMean, CPUMatrix<StatType>& saveInvStdDev) const
{
if (GetNumRows() % scale.GetNumRows() != 0)
LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix.");
if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1)
RuntimeError("Batch normalization training on CPU is not yet implemented.");
saveMean.Resize(0, 0); // only doing inference: these two are not produced
saveInvStdDev.Resize(0, 0);
bool spatial = GetNumRows() != scale.GetNumRows();
if (spatial)
{
size_t spatialSize = GetNumRows() / scale.GetNumRows();
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
size_t imap = irow / spatialSize;
ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon);
out(irow, icol) = (ElemType)(scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0));
}
}
}
else
{
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon);
out(irow, icol) = (ElemType)(scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0));
}
}
}
}
template <class ElemType>
template <class StatType>
void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<StatType>& scale, double blendFactor,
const CPUMatrix<StatType>& saveMean, const CPUMatrix<StatType>& saveInvStdDev,
CPUMatrix<StatType>& scaleGrad, CPUMatrix<StatType>& biasGrad) const
{
UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad);
RuntimeError("Batch normalization training on CPU is not yet implemented.");
}
#pragma region Static BLAS Functions
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="beta">Scalar</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier)
{
if (a.IsEmpty() || b.IsEmpty())
return;
int m, n, k, l;
int lda, ldb, ldc;
CBLAS_TRANSPOSE mklTransA;
CBLAS_TRANSPOSE mklTransB;
if (transposeA)
{
m = (int) a.GetNumCols();
k = (int) a.GetNumRows();
lda = k;
mklTransA = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
m = (int) a.GetNumRows();
k = (int) a.GetNumCols();
lda = m;
mklTransA = CBLAS_TRANSPOSE::CblasNoTrans;
}
if (transposeB)
{
l = (int) b.GetNumCols();
n = (int) b.GetNumRows();
ldb = n;
mklTransB = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
l = (int) b.GetNumRows();
n = (int) b.GetNumCols();
ldb = l;
mklTransB = CBLAS_TRANSPOSE::CblasNoTrans;
}
assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow
if (k != l)
InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match.");
if (beta == 0)
c.RequireSize(m, n);
else
c.VerifySize(m, n); // Can't resize if beta != 0
ldc = (int) c.GetNumRows();
if (pQuantizedMultiplier == nullptr)
{
if (std::is_same<ElemType, double>::value)
{
cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc);
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
// TODO: support transpose product
if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans)
LogicError("Quantized multiplier currently doesn't support transpose.");
pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data());
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b,
ElemType beta, CPUMatrix<ElemType>& c)
{
if (a.GetNumElements() != 1)
InvalidArgument("the argument a must be a scalar"); // a is a scalar
ElemType f = alpha * a.Get00Element();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f;
else
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f + c(i, j) * beta;
}
template <class ElemType>
void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c)
{
if (v.GetNumRows() != 1 && v.GetNumCols() != 1)
InvalidArgument("the argument v must be a vector"); // v is a vector
if (beta == 0)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
else
c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0
const ElemType* vd = v.Data();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j];
else
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta;
}
/* compute singular value decomposition as
A = U*SIGMA*VT
W is used as temp working memory
*/
template <class ElemType>
void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W)
{
if (A.IsEmpty())
LogicError("SVD: input matrix is empty.");
int info;
int m, n, lda, ldu, ldvt;
m = (int) A.GetNumRows();
n = (int) A.GetNumCols();
W.GetNumRows(); // W is used as temp working memory
lda = m;
ldu = m;
ldvt = n;
U.RequireSize(m, m);
SIGMA.RequireSize(std::min(m, n), 1);
VT.RequireSize(n, n);
#if CNTK_UWP
RuntimeError("Error, LAPACKE_*gesvd is not supported for UWP.\n");
#else
if (std::is_same<ElemType, double>::value)
{
std::vector<double> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()),
reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]);
}
else if (std::is_same<ElemType, float>::value)
{
std::vector<float> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()),
reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]);
}
else
{
RuntimeError("Unsupported data format");
}
#endif
if (info > 0)
{
RuntimeError("The algorithm computing SVD failed to converge.\n");
}
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c);
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c)
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = (int) (*this)(0, instance_id);
log_likelihood += softmax(instance_id, sample);
}
c(0, 0) = -log_likelihood;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = -(int) (*this)(0, instance_id);
ElemType score = bias(sample, 0);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += b(dim, sample) * a(dim, instance_id);
log_likelihood += score;
}
c(0, 0) = -log_likelihood;
}
//samples+prob gradient hidden embedding embedding/hidden
//a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix);
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c)
{
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
if (inputIndex == 1)
{
#pragma omp parallel for
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 2)
{
int i_blocks = omp_get_num_threads() * 16;
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
#pragma omp parallel for
for (int ib = 0; ib < i_blocks; ib++)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
if (sample % i_blocks == ib)
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 3)
{
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
c(0, sample) -= tmp(sample_id, instance_id);
}
}
else
InvalidArgument("The argument inputIndex must be 1 or 2 or 3.");
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
double log_likelihood = 0.0;
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
size_t num_noise_samples = sample_size - 1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
double score = bias(0, sample);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += (double)(a(dim, instance_id) * b(dim, sample));
double sample_prob = -(*this)(2 * sample_id + 1, instance_id);
if (sample_id == 0)
sample_prob = -sample_prob;
double score_noise = log_num_noise_samples + sample_prob;
double z = LogAdd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob);
if (sample_id == 0)
tmp(sample_id, instance_id) += 1;
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
c(0, 0) = (ElemType) -log_likelihood;
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c);
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary>
/// if a is a column vector, add to all columns of c
/// if a is a row vector, add to all rows of c
/// if a is a scalar, add to all rows of c
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty() || c.IsEmpty())
LogicError("ScaleAndAdd: one of the input matrices is empty.");
if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector
{
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
const int incy = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n)
InvalidArgument("Dimension of matrix c does not match dimension of matrix a.");
if (std::is_same<ElemType, double>::value)
{
cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy);
}
else
{
RuntimeError("Unsupported data format");
}
}
else if (a.GetNumElements() == 1) // scalar, add to all elements
{
ElemType v = alpha * a(0, 0);
long m = (long) c.GetNumRows(), n = (long) c.GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
c(i, j) += v;
c(i + 1, j) += v;
c(i + 2, j) += v;
c(i + 3, j) += v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
c(i, j) += v;
}
}
}
else if (a.GetNumCols() == 1) // col vector, add it to all columns
{
int m = (int) c.GetNumRows();
if (m != (int) a.GetNumRows())
InvalidArgument("To add column vector, rows should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else // row vector, add it to all rows
{
int m = (int) c.GetNumRows();
int n = (int) c.GetNumCols();
if (n != (int) a.GetNumCols())
InvalidArgument("To add row vector, cols should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() &&
a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()))
{
InvalidArgument("AddScaledDifference: a, b, and c must have same dimension.");
}
if (a.IsEmpty())
LogicError("AddScaledDifference: Input matrix a is empty.");
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
{
InvalidArgument("AssignScaledDifference: a, b must have same dimension.");
}
if (a.IsEmpty())
LogicError("AssignScaledDifference: Input matrix a is empty.");
if (&c != &a && &c != &b)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
// c[ci,cj] += a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AddElementToElement: index out of range.");
ElemType us = beta ? beta * c(ci, cj) : (ElemType)0; // do not multiply if beta is 0, could be a NaN
us += a(ai, aj);
c(ci, cj) = us;
}
////c[ci,cj] += a[ai,aj]
//template<class ElemType>
//void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
//{
// if (ai >= a.GetNumRows() || aj >=a.GetNumCols() ||
// ci >= c.GetNumRows() || cj >=c.GetNumCols())
// InvalidArgument("AddElementToElement: index out of range.");
//
// ElemType v = a(ai,aj);
// c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v));
//}
#if 0 // now done as AddElementToElement (beta=0)
// c[ci,cj] = a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AssignElementToElement: index out of range.");
c(ci, cj) = a(ai, aj);
}
#endif
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AddScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AssignScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
c.RequireSize(m, n);
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (alpha == 0)
{
memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements());
return;
}
long size = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (size & ~3); i += 4)
{
cBufPtr[i] = alpha * aBufPtr[i];
cBufPtr[i + 1] = alpha * aBufPtr[i + 1];
cBufPtr[i + 2] = alpha * aBufPtr[i + 2];
cBufPtr[i + 3] = alpha * aBufPtr[i + 3];
}
// remaining elements
for (long i = size & ~3; i < size; i++)
{
cBufPtr[i] = alpha * aBufPtr[i];
}
}
/// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if (alpha == 0 && incx == 1)
{
memset(a.Data(), 0, sizeof(ElemType) * len);
}
else if (std::is_same<ElemType, double>::value)
{
cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx);
}
else
{
RuntimeError("Unsupported data format");
}
}
/// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary>
/// <param name="alpha">1x1 matrix</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
if (alpha.GetNumElements() != 1)
LogicError("Matrix alpha must be 1x1");
CPUMatrix<ElemType>::Scale(alpha(0, 0), a);
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(1, n);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
// treat matrices as vectors. do vec(a)^T vec(b)
template <class ElemType>
ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProductOfMatrices: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension.");
if (std::is_same<ElemType, double>::value)
{
return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1);
}
else if (std::is_same<ElemType, float>::value)
{
#pragma warning(suppress : 4244)
return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1);
}
else
{
RuntimeError("Unsupported data format");
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: The input matrix a is empty.");
c.RequireSize(a.GetNumRows(), a.GetNumCols());
if (alpha == 2)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j);
}
}
else if (alpha == 3)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j) * a(i, j);
}
}
else
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = pow(a(i, j), alpha);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchMatMul(ElemType beta, const CPUMatrix<ElemType>& a, const bool transposeA, const int m, const CPUMatrix<ElemType>& b, const bool transposeB, const int n, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("BatchMatMul: one of the input matrices is empty.");
if (!isColWise)
LogicError("Only column wise is supported.");
const int aSampleElemNum = (int)a.GetNumRows();
const int aBatchSize = (int)a.GetNumCols();
const int bSampleElemNum = (int)b.GetNumRows();
const int bBatchSize = (int)b.GetNumCols();
assert(aSampleElemNum > 0 && aBatchSize > 0 && bSampleElemNum > 0 && bBatchSize > 0);
if (aBatchSize != bBatchSize)
InvalidArgument("BatchMatMul: Matrices a and b should have same batch size.");
int k = aSampleElemNum / m;
int kb = bSampleElemNum / n;
if (k != kb)
InvalidArgument("BatchMatMul: Matrices a's cols number should match Matrices b's rows number.");
size_t cSampleElemNum = m * n;
if (beta == 0)
c.RequireSize(cSampleElemNum, aBatchSize);
else
c.VerifySize(cSampleElemNum, aBatchSize); // Can't resize if beta != 0
#ifdef USE_OPENBLAS
int lda, ldb, ldc;
CBLAS_TRANSPOSE blasTransA;
CBLAS_TRANSPOSE blasTransB;
lda = transposeA ? k : m;
ldb = transposeB ? n : k;
blasTransA = transposeA ? CblasTrans : CblasNoTrans;
blasTransB = transposeB ? CblasTrans : CblasNoTrans;
ldc = m;
std::vector<const ElemType *> a_array;
std::vector<const ElemType *> b_array;
std::vector<ElemType *> c_array;
a_array.reserve(aBatchSize);
b_array.reserve(aBatchSize);
c_array.reserve(aBatchSize);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
for (size_t i = 0; i < aBatchSize; i++)
{
a_array.push_back(aBufPtr + a.LocateColumn(i));
b_array.push_back(bBufPtr + b.LocateColumn(i));
c_array.push_back(cBufPtr + c.LocateColumn(i));
}
for (size_t i = 0; i < aBatchSize; i++)
{
if (sizeof(ElemType) == sizeof(double))
{
double alpha = 1.0;
cblas_dgemm((CBLAS_ORDER)(int)MatrixOrder::ColMajor, blasTransA, blasTransB, m, n, k, alpha, reinterpret_cast<const double*>(a_array[i]), lda, reinterpret_cast<const double*>(b_array[i]), ldb, double(beta), reinterpret_cast<double*>(c_array[i]), ldc);
}
else
{
float alpha = 1.0f;
cblas_sgemm((CBLAS_ORDER)(int)MatrixOrder::ColMajor, blasTransA, blasTransB, m, n, k, alpha, reinterpret_cast<const float*>(a_array[i]), lda, reinterpret_cast<const float*>(b_array[i]), ldb, float(beta), reinterpret_cast<float*>(c_array[i]), ldc);
}
}
#else
std::vector<int> m_array(aBatchSize, m);
std::vector<int> n_array(aBatchSize, n);
std::vector<int> k_array(aBatchSize, k);
std::vector<int> lda_array(aBatchSize, transposeA ? k : m);
std::vector<int> ldb_array(aBatchSize, transposeB ? n : k);
std::vector<int> ldc_array(aBatchSize, m);
std::vector<int> group_size(1, aBatchSize);
std::vector<CBLAS_TRANSPOSE> transa_array(aBatchSize, transposeA ? CblasTrans : CblasNoTrans);
std::vector<CBLAS_TRANSPOSE> transb_array(aBatchSize, transposeB ? CblasTrans : CblasNoTrans);
std::vector<const ElemType *> a_array;
std::vector<const ElemType *> b_array;
std::vector<ElemType *> c_array;
a_array.reserve(aBatchSize);
b_array.reserve(aBatchSize);
c_array.reserve(aBatchSize);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
for (size_t i = 0; i < aBatchSize; i++)
{
a_array.push_back(aBufPtr + a.LocateColumn(i));
b_array.push_back(bBufPtr + b.LocateColumn(i));
c_array.push_back(cBufPtr + c.LocateColumn(i));
}
if (sizeof(ElemType) == sizeof(double))
{
std::vector<double> alpha_array(group_size[0], 1.0);
std::vector<double> beta_array(group_size[0], double(beta));
cblas_dgemm_batch(CblasColMajor, &transa_array[0], &transb_array[0], &m_array[0], &n_array[0], &k_array[0], &alpha_array[0],
reinterpret_cast<const double**>(&a_array[0]), &lda_array[0], reinterpret_cast<const double**>(&b_array[0]), &ldb_array[0], &beta_array[0],
reinterpret_cast<double**>(&c_array[0]), &ldc_array[0], 1, &group_size[0]);
}
else
{
std::vector<float> alpha_array(group_size[0], 1.0f);
std::vector<float> beta_array(group_size[0], float(beta));
cblas_sgemm_batch(CblasColMajor, &transa_array[0], &transb_array[0], &m_array[0], &n_array[0], &k_array[0], &alpha_array[0],
reinterpret_cast<const float**>(&a_array[0]), &lda_array[0], reinterpret_cast<const float**>(&b_array[0]), &ldb_array[0], &beta_array[0],
reinterpret_cast<float**>(&c_array[0]), &ldc_array[0], 1, &group_size[0]);
}
#endif
}
template <class ElemType>
bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/)
{
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
return false;
bool result = true;
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (abs(a(i, j) - b(i, j)) > threshold)
{
result = false;
break;
}
}
return result;
}
// see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments
template <class ElemType>
void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
size_t N = D * S * M * K * T;
const auto pa = a.Data();
const auto pb = b.Data();
auto pc = c.Data();
// Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU.
for (size_t na = 0; na < N; na++) // loop over all elements
{
// recover the 5 indices from the loop counter
size_t d = na % D;
size_t s = (na / D) % S;
size_t m = (na / D / S) % M;
size_t k = (na / D / S / M) % K;
size_t t = (na / D / S / M / K) % T;
// compute index for the a and b/c tensors
assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T)
size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped
assert(nb < N);
// perform the computation
ElemType cval = keepWeight ? keepWeight * pb[nb] : (ElemType)0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe)
cval += scaleFactor * pa[na];
pc[nb] = cval;
}
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(0);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows)
{
CPUMatrix<ElemType> c(rows, rows); // will initialize to 0
c.SetDiagonalValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetUniformRandomValue(low, high, seed);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetGaussianRandomValue(mean, sigma, seed);
return c;
}
template <class ElemType>
bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v)
{
bool bHas = false;
bool isvFinite = std::isfinite(v);
#pragma omp parallel for
for (long j = 0; j < mat.GetNumElements(); j++)
{
#pragma omp flush(bHas)
if (!bHas)
{
ElemType cur = mat.Data()[j];
if (isvFinite && std::isfinite(cur))
{
if (cur == v)
bHas = true;
}
else if (std::isnan(v) && std::isnan(cur))
bHas = true;
else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur))
bHas = true;
}
}
return bHas;
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]
// the inputs are two rwo vectors
// the output is a matrix of size(neg+1, col)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(negnumber + 1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n)
// #pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, j);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < m; i++)
{
us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n);
}
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors");
// c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(negnumber + 1, n); // this line ischanged
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else if (std::is_same<ElemType, float>::value)
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else
{
RuntimeError("Unsupported data format");
}
}
else
{
InvalidArgument("InnerProduct: Rowwise is not supported yet");
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (std::is_same<ElemType, double>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else if (std::is_same<ElemType, float>::value)
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
else
{
RuntimeError("Unsupported data format");
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index)
{
if (a.IsEmpty())
LogicError("GetARowByIndex: the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
if (index < 0 || index >= m)
LogicError("GetARowByIndex: the row index is out of range.");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
auto& us = *this;
RequireSize(1, n);
for (long j = 0; j < n; j++)
{
us(0, j) = a(index, j);
}
return *this;
}
// input: a, a row vector
// input: b, a matrix. b.col == a.col
// input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order
// output: c, a matrix. c.size == b.size
/*
Example, a = [a1 a2 a3]
b = [b11 b12 b13;
b21 b22 b23 ]
if true:
shift = 1
then c = [a1*b12 a2*b13 a3*b11
a1*b22 a2*b23 a3*b21]
if shift = 2
then c = [ a1*b13 a2*b11 a3*b12
a1*b23 a2*b21 a3*b22]
i.e. we do column-wise shift
if false:
shift = 1
then c = [a2*b11 a3*b12 a1*b13
a2*b21 a3*b22 a1*b23]
shift = 2
then c = [ a3*b11 a1*b12 a2*b13
a3*b21 a1*b22 a2*b23]
*/
template <class ElemType>
void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != 1 || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
c.RequireSize(k, l); // c must the same size of b
if (bFirstmatrixfixed)
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, j) * b(i, (j + shift) % l);
}
}
}
else
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, (j + shift) % l) * b(i, j);
}
}
}
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift.
// inputs are 2 row vectors
// output is a row vector
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
// long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n)
long n = (long) GetNumCols(); // a and b are of size (1,n)
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, (j + shift) % n);
}
return *this;
}
#pragma endregion Static BLAS Functions
// 'double' version of LogAdd
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::LogSumOfElements() const
{
ElemType fAlpha = (ElemType) LZERO;
ElemType* bufPtr = Data();
for (int k = 0; k < GetNumElements(); k++)
fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]);
return fAlpha;
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& pair_scores)
{
int iNumPos = (int) lbls.GetNumCols();
int iNumLab = (int) lbls.GetNumRows();
int lastLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, iNumPos - 1) != 0)
{
lastLbl = ik;
break;
}
beta.RequireSize(iNumLab, iNumPos);
for (int t = iNumPos - 1; t >= 0; t--)
{
#pragma omp parallel for
for (int k = 0; k < iNumLab; k++)
{
_rcrfBackwardCompute(t, k, alpha, beta, pair_scores);
}
}
};
// Calculate alpha in forward-backward calculation. equation (6), (7) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance
// prob (input): the posterior output from the network
// alpha (output): alpha for forward-backward calculation.
// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance
// phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance
// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance.
// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch
// uttBeginFrame(input): the position of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance.
// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch
// numChannels (input): channel number in this minibatch
// uttNum (input): number of utterances
// t (input): time stamp to process
// maxPhoneNum (input): the max number of phones between utterances
// totalPhoneNum (input): the total number of phones of all utterances
// blankTokenId (input): id of the CTC blank token
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// Alpha and Beta scores outside of the delay boundary are set to zero.
// Setting this parameter smaller will result in shorted delay between label output during decoding.
// delayConstraint=-1 means no constraint
template<class ElemType>
void _assignAlphaScore(
const ElemType *prob,
ElemType *alphaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
size_t numChannels,
const size_t uttNum,
const size_t t,
const size_t maxPhoneNum, // Maximum length of utterance in this MB
const size_t totalPhoneNum, // Total number of phones
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
// Index of the label in the sequence
// Current and previous phone indices in phoneSeq matrix
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
// Actual current phone label
size_t phoneId = (size_t)(phoneSeq[labelid]);
// Index of the current frame in minibatch
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
// Index of probability of observing phoneId at frame timeId
size_t probId = timeId*totalPhoneNum + phoneId;
size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s)
if (t == 0)
{
// Initialize recursion
if (phoneSeqId == 1 || phoneSeqId == 2)
{
alphaScore[alphaId] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1)
size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s)
size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1)
size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2)
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId > 2)
{
size_t labelid_2 = labelid - 2;
// if current label is not blank and not equal prev non-blank label
if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2]))
{
x = LogAdd(x, alphaScore[alphaId_2]);
}
}
if (phoneSeqId > 1)
{
x = LogAdd(x, alphaScore[alphaId_1]);
}
x = LogAdd(x, alphaScore[alphaId_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId]; // Probability of observing given label at given time
else
ascore = 0;
alphaScore[alphaId] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t labelid_r = labelid + 2;
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]);
if (phoneId == blankTokenId)
{
// only constraint right side
if (t > phoneBoundId_r + delayConstraint - 1)
alphaScore[alphaId] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
alphaScore[alphaId] = LZERO;
}
}
}
}
}
}
}
// Calculate beta in forward-backward calculation, equation (10), (11) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignBetaScore(
const ElemType *prob,
ElemType *betaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const size_t numChannels,
const size_t uttNum,
const long t,
const size_t maxPhoneNum,
const size_t totalPhoneNum,
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
size_t labelid_2 = labelid + 2;
size_t phoneId = (LONG64)(phoneSeq[labelid]);
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
size_t probId = timeId*totalPhoneNum + phoneId;
size_t betaid = maxPhoneNum* timeId + phoneSeqId;
size_t timeId_1 = timeId + numChannels;
size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId;
size_t betaid_1 = betaid_0 + 1;
size_t betaid_2 = betaid_0 + 2;
if (t == frameNum - 1)
{
if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2)
{
betaScore[betaid] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId < phoneNum - 3)
{
if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2])
{
x = LogAdd(x, betaScore[betaid_2]);
}
}
if (phoneSeqId < phoneNum - 2)
{
x = LogAdd(x, betaScore[betaid_1]);
}
x = LogAdd(x, betaScore[betaid_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId];
else
ascore = 0;
betaScore[betaid] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]);
if (phoneId == blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint - 1)
betaScore[betaid] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
betaScore[betaid] = LZERO;
}
}
}
}
}
}
}
// Calculate CTC score. equation (8) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
template<class ElemType>
void _assignTotalScore(ElemType *betaScore,
std::vector<ElemType>& totalScore,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const size_t numChannels,
const size_t maxPhoneNum)
{
#pragma omp parallel for
for (int uttId = 0; uttId < uttNum; uttId++) {
if (uttId < uttNum)
{
LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]);
totalScore[uttId] = betaScore[alphaId_0];
}
}
}
// Calculate derivative, equation (15) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignCTCScore(
ElemType *CTCscore,
ElemType *prob,
ElemType *alphaScore,
ElemType *betaScore,
ElemType *phoneSeq,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const std::vector<size_t>& uttFrameNum,
const size_t numChannels,
const size_t maxPhoneNum,
const size_t totalPhoneNum)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
#pragma omp parallel for
for (int t = 0; t < uttFrameNum[uttId]; t++) {
size_t phoneNum = uttPhoneNum[uttId];
size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
ElemType P_lx = betaScore[alphaId_0];
for (int s = 1; s < phoneNum - 1; s++)
{
long phoneId = phoneSeq[uttId*maxPhoneNum + s];
size_t alphaId = maxPhoneNum* timeId + s;
size_t probId = timeId*totalPhoneNum + phoneId;
if (phoneId != SIZE_MAX)
{
ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx;
CTCscore[probId] = LogAdd(CTCscore[probId], logoccu);
}
}
for (int s = 0; s < totalPhoneNum; s++)
{
size_t probId = timeId*totalPhoneNum + s;
ElemType logoccu = CTCscore[probId];
if (logoccu < LZERO)
CTCscore[probId] = 0.0f;
else
CTCscore[probId] = exp(logoccu);
}
}
}
}
template<class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore(
const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, CPUMatrix<ElemType> & totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum,
const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise)
{
// Column wise representation of sequences in input matrices (each column is one sequence/utterance)
if (isColWise)
{
// Total number of phones
size_t totalPhoneNum = prob.GetNumRows();
size_t uttNum = uttFrameNum.size();
// Max number of phones in utterances in this minibatch
size_t maxPhoneNum = phoneSeq.GetNumRows();
for (size_t t = 0; t < maxFrameNum; t++)
{
_assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
for (LONG64 t = maxFrameNum - 1; t >= 0; t--)
{
_assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
std::vector<ElemType> scores(uttNum);
_assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum);
_assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd,
uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum);
totalScore(0, 0) = 0.0;
for (size_t utt = 0; utt < uttNum; utt++)
{
totalScore(0,0) -= scores[utt];
}
return *this;
}
else {
LogicError("Only ColWise minibatch layout is supported.");
}
return *this;
}
/// the kernel function for RCRF backward computation
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha,
CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores)
{
size_t iNumLab = alpha.GetNumRows();
size_t iNumPos = alpha.GetNumCols();
ElemType fSum;
ElemType fTmp = (ElemType) LZERO;
if (t == iNumPos - 1)
{
fSum = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LogAddD(fSum, alpha(j, t));
}
fTmp = alpha(k, t) - fSum;
beta(k, t) = fTmp;
}
else
{
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LZERO;
for (int m = 0; m < iNumLab; m++)
{
fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m));
}
fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum);
}
beta(k, t) = fTmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd)
{
int iNumPos = (int) alpha.GetNumCols();
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
for (size_t tPos = 0; tPos < iNumPos; tPos++)
{
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
#pragma omp parallel for
for (int i = 0; i < iNumLab; i++)
{
_rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos);
}
// transition score
int i = -1;
if (tPos == 0)
i = firstLbl;
else
{
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, tPos - 1) != 0)
{
i = ik;
break;
}
}
int j = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
{
if (lbls(ik, tPos) != 0)
{
j = ik;
break;
}
}
grd(j, i) -= 1.0;
}
};
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd,
const size_t tPos // position
)
{
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
{
ElemType fTmp = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
if (tPos == 0)
{
if (i == firstLbl)
{
fTmp = 0;
}
else
{
fTmp = (ElemType) LZERO;
}
}
else
{
fTmp = a(i, 0);
}
fTmp += pair_scores(j, i);
ElemType fSum = (ElemType) LZERO;
for (int k = 0; k < iNumLab; k++)
{
ElemType fTmp2;
if (tPos == 0)
{
if (k == firstLbl)
{
fTmp2 = 0;
}
else
{
fTmp2 = (ElemType) LZERO;
}
}
else
{
fTmp2 = a(k, 0);
}
fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k));
}
fTmp -= fSum;
fTmp += b(j, 0);
grd(j, i) += exp(fTmp);
}
}
};
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold)
{
auto& us = *this;
if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows())
LogicError("DropFrame: target matrix is not in the same size as gamm matrix.");
#pragma omp parallel for
foreach_column (j, label)
{
bool dropframe = false;
foreach_row (i, label)
{
if (fabs(label(i, j) - 1.0f) < 0.1)
{
if (gamma(i, j) < threshhold)
dropframe = true;
break;
}
}
foreach_row (i, label)
{
us(i, j) = 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label,
const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha)
{
auto& us = *this;
foreach_coord (i, j, us)
us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j));
return *this;
}
// note: this function does not depend on the <ElemType> parameter
template <class ElemType>
int CPUMatrix<ElemType>::SetNumThreads(int numThreads)
{
if (numThreads == 0) // use default
return numThreads;
int mthreads = (int) std::thread::hardware_concurrency();
if (numThreads <= 0)
numThreads = std::max(1, mthreads + numThreads);
if (numThreads > mthreads)
numThreads = mthreads;
#ifdef _OPENMP
omp_set_num_threads(numThreads);
numThreads = omp_get_max_threads();
#ifdef USE_MKL
mkl_set_num_threads(numThreads);
#elif defined(USE_OPENBLAS)
openblas_set_num_threads(numThreads);
#endif
#endif
return numThreads;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetMaxNumThreads()
{
int numThreads = (int)std::thread::hardware_concurrency();
#ifdef _OPENMP
numThreads = omp_get_max_threads();
#endif
return numThreads;
}
// To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs,
// the function set CBWR compatible mode.
template <class ElemType>
void CPUMatrix<ElemType>::SetCompatibleMode()
{
// mkl_cbwr_set not supported in MKLML yet
// Explanation on numeric diff: https://software.intel.com/en-us/articles/introduction-to-the-conditional-numerical-reproducibility-cnr
// #ifdef USE_MKL
// if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS)
// RuntimeError("Could not set MKL compatible mode.");
// #endif
}
template <class ElemType>
void CPUMatrix<ElemType>::SetOptimizationFlags(int flags)
{
m_optimizationFlags = flags;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetOptimizationFlags()
{
return m_optimizationFlags;
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; calls into CPUMatrixTensorOpImpl
// -----------------------------------------------------------------------
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
CPUMatrixTensorOpImpl<ElemType>(beta, a, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
CPUMatrixTensorOpImpl<ElemType>(beta, a, b, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
CPUMatrixTensorOpImpl<ElemType>(beta, a, b, c, *this, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmin() const
{
int minArg = -1;
ElemType minValue = std::numeric_limits<ElemType>::max();
#pragma omp parallel
{
int localMinArg = -1;
ElemType localMinValue = std::numeric_limits<ElemType>::max();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMinValue > Data()[index])
{
localMinArg = index;
localMinValue = Data()[index];
}
// If we have more then one min value, select the one with lower index.
else if ((localMinValue == Data()[index]) && (localMinArg > index))
{
localMinArg = index;
}
}
#pragma omp critical
{
if (minValue > localMinValue)
{
minArg = localMinArg;
minValue = localMinValue;
}
// If we have more then one min value, select the one with lower index.
else if ((minValue == localMinValue) && (minArg > localMinArg))
{
minArg = localMinArg;
}
}
}
return minArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmax() const
{
int maxArg = -1;
ElemType maxValue = std::numeric_limits<ElemType>::lowest();
#pragma omp parallel
{
int localMaxArg = -1;
ElemType localMaxValue = std::numeric_limits<ElemType>::lowest();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMaxValue < Data()[index])
{
localMaxArg = index;
localMaxValue = Data()[index];
}
// If we have more then one max value, select the one with lower index.
else if ((localMaxValue == Data()[index]) && (localMaxArg > index))
{
localMaxArg = index;
}
}
#pragma omp critical
{
if (maxValue < localMaxValue)
{
maxArg = localMaxArg;
maxValue = localMaxValue;
}
// If we have more then one max value, select the one with lower index.
else if ((maxValue == localMaxValue) && (maxArg > localMaxArg))
{
maxArg = localMaxArg;
}
}
}
return maxArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const
{
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
return Argmin();
break;
case ElementWiseOperator::opArgmax:
return Argmax();
break;
}
InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
return -1;
}
template <class ElemType>
void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
CPUMatrixTensorArgOpImpl<ElemType>(a, *this, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
template <class ElemType>
void CPUMatrix<ElemType>::ScatterValues(ElemType* indices, ElemType* value, ElemType* data, ElemType alpha, size_t num_indices, size_t rows, size_t cols, size_t indices_step)
{
if (!indices || !value || !data)
LogicError("ScatterValues: input data is null.");
#pragma omp parallel
{
int ithread = omp_get_thread_num();
int nthread = omp_get_num_threads();
for (auto i = 0; i < num_indices; i++)
{
auto col_r = indices[i * indices_step];
if (std::isnan(col_r) || col_r < 0)
continue;
auto col = (size_t)col_r;
//ignore the elements that is not partitioned into this thread
if (col % nthread != ithread)
continue;
if (col >= cols)
InvalidArgument("ScatterValues: Indices map out of bounds. %ld >= %ld", (long int)col, (long int)cols);
auto index = col * rows;
auto offset = i * rows;
for (auto j = 0; j < rows; j++)
data[index + j] = data[index + j] + alpha * value[offset + j];
}
}
}
// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags);
template CPUMatrix<char>::CPUMatrix();
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&);
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&);
template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const;
template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&);
template void CPUMatrix<char>::SetValue(const char);
template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags);
template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&);
template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template char* CPUMatrix<char>::CopyToArray(void) const;
template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
template void CPUMatrix<char>::Reshape(const size_t, const size_t);
// Support <short>
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags);
template CPUMatrix<short>::CPUMatrix();
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&);
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&);
template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const;
template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&);
template void CPUMatrix<short>::SetValue(const short);
template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags);
template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&);
template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template short* CPUMatrix<short>::CopyToArray(void) const;
template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const;
template void CPUMatrix<short>::Reshape(const size_t, const size_t);
template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t);
}}}
|
needle.c | #define LIMIT -999
#define TRACE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "openacc.h"
//#define OPENMP
//#define NUM_THREAD 4
#define DEBUG
#ifndef VERIFICATION
#define VERIFICATION 1
#endif
#ifndef _MAX_ROWS_
#define _MAX_ROWS_ 2049
#ifdef _OPENARC_
#pragma openarc #define _MAX_ROWS_ 2049
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
int max_rows, max_cols, penalty;
int omp_num_threads;
double gettime() {
struct timeval t;
gettimeofday(&t,0);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
double start_time, end_time;
start_time = gettime();
runTest( argc, argv);
end_time = gettime();
printf("Total Execution Time %lf sec. \n", end_time - start_time);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> <num_threads>\n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
fprintf(stderr, "\t<num_threads> - no. of threads\n");
exit(1);
}
void mainComp(int input_itemsets[_MAX_ROWS_*_MAX_ROWS_], int referrence[_MAX_ROWS_*_MAX_ROWS_])
{
int i, idx, index;
/////////////////////////////////
// Used for inlining maximum() //
/////////////////////////////////
int a, b, c, k;
long int iSum;
#pragma acc data \
copy(input_itemsets[0:_MAX_ROWS_*_MAX_ROWS_]) \
copyin(referrence[0:_MAX_ROWS_*_MAX_ROWS_])
{
for( i = 0 ; i < max_cols-2 ; i++){
#pragma acc kernels loop gang worker independent
for( idx = 0 ; idx <= i ; idx++){
index = (idx + 1) * max_cols + (i + 1 - idx);
// input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
printf("Processing bottom-right matrix\n");
//Compute bottom-right matrix
for( i = max_cols - 4 ; i >= 0 ; i--){
#pragma acc kernels loop gang worker independent
for( idx = 0 ; idx <= i ; idx++){
index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ;
//input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
}
//Fake computation to measure timing of unified memory version.
iSum = 0;
for( i=0; i<_MAX_ROWS_*_MAX_ROWS_; i++ )
iSum += input_itemsets[i];
printf("Sum of input_itemsets: %ld\n", iSum);
}
void mainCompCPU(int input_itemsets[_MAX_ROWS_*_MAX_ROWS_], int referrence[_MAX_ROWS_*_MAX_ROWS_])
{
int i, idx, index;
/////////////////////////////////
// Used for inlining maximum() //
/////////////////////////////////
int a, b, c, k;
for( i = 0 ; i < max_cols-2 ; i++){
#ifdef _OPENMP
//omp_set_num_threads(omp_num_threads);
#pragma omp parallel for shared(input_itemsets) firstprivate(i,max_cols,penalty) private(idx, index)
#endif
for( idx = 0 ; idx <= i ; idx++){
index = (idx + 1) * max_cols + (i + 1 - idx);
// input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
//Compute bottom-right matrix
for( i = max_cols - 4 ; i >= 0 ; i--){
#ifdef _OPENMP
//omp_set_num_threads(omp_num_threads);
#pragma omp parallel for shared(input_itemsets) firstprivate(i,max_cols,penalty) private(idx, index)
#endif
for( idx = 0 ; idx <= i ; idx++){
index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ;
//input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int *input_itemsets, *output_itemsets, *referrence;
int i,j;
#ifdef DEBUG
double start_time, end_time, init_time;
#endif
#ifdef TRACE
FILE *fp;
#endif
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 4)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
omp_num_threads = atoi(argv[3]);
if( max_rows != (_MAX_ROWS_-1) ) {
printf("Wrong value (%d) for macro, _MAX_ROWS_!\n", _MAX_ROWS_);
return;
}
}
else{
usage(argc, argv);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
#ifdef DEBUG
start_time = gettime();
#endif
//referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
//input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
referrence = (int *)acc_create_unified(NULL, max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)acc_create_unified(NULL, max_rows * max_cols * sizeof(int) );
#ifdef DEBUG
init_time = gettime() - start_time;
#endif
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (i = 0 ; i < max_cols; i++){
for (j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (i = 1 ; i < max_cols; i++){
for (j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
//Compute top-left matrix
printf("Num of threads: %d\n", omp_num_threads);
printf("Processing top-left matrix\n");
#ifdef DEBUG
start_time = gettime();
#endif
mainComp(input_itemsets, referrence);
#ifdef DEBUG
end_time = gettime();
printf("Accelerator Elapsed Time = %lf sec. \n", end_time - start_time + init_time);
#endif
if(VERIFICATION) {
int *input_itemsets_CPU;
double deltaL2Norm = 0;
double nonAccL2Norm = 0;
double L2Norm;
input_itemsets_CPU = (int *)malloc( max_rows * max_cols * sizeof(int) );
srand ( 7 );
for (i = 0 ; i < max_cols; i++){
for (j = 0 ; j < max_rows; j++){
input_itemsets_CPU[i*max_cols+j] = 0;
}
}
for( i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets_CPU[i*max_cols] = rand() % 10 + 1;
}
for( j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets_CPU[j] = rand() % 10 + 1;
}
for( i = 1; i< max_rows ; i++)
input_itemsets_CPU[i*max_cols] = -i * penalty;
for( j = 1; j< max_cols ; j++)
input_itemsets_CPU[j] = -j * penalty;
#ifdef DEBUG
start_time = gettime();
#endif
mainCompCPU(input_itemsets_CPU, referrence);
#ifdef DEBUG
end_time = gettime();
printf("Main Comp. Time CPU = %lf sec. \n", end_time - start_time);
#endif
for (i = 0; i < max_rows * max_cols; ++i) {
double d = input_itemsets_CPU[i] - input_itemsets[i];
deltaL2Norm += d * d;
nonAccL2Norm += input_itemsets_CPU[i] * input_itemsets_CPU[i];
}
L2Norm = sqrt(deltaL2Norm / nonAccL2Norm);
if (L2Norm < 1e-9) {
printf("Verification: Successful\n");
} else {
printf("Verification: Failed\n");
}
printf("L2Norm = %lf\n", L2Norm);
free(input_itemsets_CPU);
}
#ifdef TRACE
printf("print traceback value CPU:\n");
if( (fp = fopen("nwTrace.txt", "w")) == 0 ) {
printf("Can not open %s\n", "nwTrace.txt");
return;
}
//int i, j;
for (i = j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fp, "%d ", input_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = input_itemsets[(i - 1) * max_cols + j - 1];
w = input_itemsets[ i * max_cols + j - 1 ];
n = input_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = input_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = input_itemsets[(i - 1) * max_cols + j];
}
else{
}
traceback = maximum(nw, w, n);
fprintf(fp, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fprintf(fp, "\n");
fclose(fp);
#endif
}
|
app_main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp_interface.h"
extern int __htc_get_unit_count();
extern int global_radius;
int app_main(int argc, char **argv) {
global_radius = 1;
char *filename;
char *newfilename;
char *system_command;
if (argc != 2) {
filename = "lena512.bmp";
} else {
filename = argv[1];
}
newfilename = (char *)malloc(strlen("new_")+strlen(filename)+1);
strcpy(newfilename, "new_");
strcpy(newfilename + strlen(newfilename), filename);
// Load image
bmap_t bmap = load_bmp(filename);
uint32_t rows = get_bmp_num_rows(bmap);
uint32_t cols = get_bmp_num_cols(bmap);
uint32_t arows = rows+2*global_radius;
uint32_t acols = cols+2*global_radius;
uint32_t bufsize = arows*acols;
printf("dimensions are %d by %d\n", rows, cols);
// Get image buffer
uint8_t *image = get_bmp_buffer(bmap);
// Process
uint8_t kernel3x3[] = {
// Identity (sanity test)
// 0, 0, 0,
// 0, 1, 0,
// 0, 0, 0
//
// Sobel 3x3
// -1, 0, 1,
// -2, 0, 2,
// -1, 0, 1
// Laplace (common discrete approx 2)
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
};
// Allocate target temp buffer.
extern void *stencil_cp_alloc(size_t);
uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize);
uint8_t *uhost = (uint8_t *)malloc(bufsize);
memset(unew, 0xff, bufsize); // Make cg bugs obvious in output image.
memset(uhost, 0xff, bufsize);
// Generate a host version of the stencil for expected results testing.
rhomp_stencil_conv2ds(uhost, image, acols, arows, 3, kernel3x3, 8192);
#pragma omp target teams num_teams(1)
{
rhomp_stencil_conv2ds(unew, image, acols, arows, 3, kernel3x3, 2);
} /* end omp_target */
if (memcmp(uhost, unew, bufsize) == 0) {
printf("Coproc matches host.\nPASSED\n");
} else {
printf("FAILED: coproc does not match host.\n");
}
// Update and Save
set_bmp_from_buffer(bmap, unew);
save_bmp(bmap, newfilename);
// Display
char *viewer = "firefox ";
//char *viewer = "shotwell ";
system_command = (char *)malloc(strlen(viewer) + strlen(newfilename) + 1);
strcpy(system_command, viewer);
strcpy(system_command+(strlen(system_command)), newfilename);
system(system_command);
return 0;
}
|
polybench.c | #include "polybench.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS)
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:tmp) private(i)
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* cur = NULL;
int ret = posix_memalign (&cur, 32, num);
if (! cur || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return cur;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
GB_reduce_each_index.c | //------------------------------------------------------------------------------
// GB_reduce_each_index: T(i)=reduce(A(i,:)), reduce a matrix to a vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a vector. All entries in A(i,:) are reduced to T(i).
// First, all threads reduce their slice to their own workspace, operating on
// roughly the same number of entries each. The vectors in A are ignored; the
// reduction only depends on the indices. Next, the threads cooperate to
// reduce all workspaces to the workspace of thread 0. Finally, this last
// workspace is collected into T.
// If an out-of-memory condition occurs, the macro GB_FREE_ALL frees any
// workspace. This has no effect on the built-in workers (GB_FREE_ALL does
// nothing), and the workspace is freed in the caller. For the generic worker,
// the GB_FREE_ALL macro defined in GB_reduce_to_vector frees all workspace.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *GB_RESTRICT Ax = A->x ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t n = A->vlen ;
size_t zsize = ttype->size ;
//--------------------------------------------------------------------------
// allocate workspace for each thread
//--------------------------------------------------------------------------
int ntasks = 256 * nthreads ;
ntasks = GB_IMIN (ntasks, n) ;
GB_CTYPE *GB_RESTRICT *Works = NULL ; // size nth
bool *GB_RESTRICT *Marks = NULL ; // size nth
int64_t *GB_RESTRICT Tnz = NULL ; // size nth
int64_t *GB_RESTRICT Count = NULL ; // size ntasks+1
GB_CALLOC_MEMORY (Works, nth, sizeof (GB_CTYPE *)) ;
GB_CALLOC_MEMORY (Marks, nth, sizeof (bool *)) ;
GB_CALLOC_MEMORY (Tnz, nth, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (Count, ntasks+1, sizeof (int64_t)) ;
bool ok = (Works != NULL && Marks != NULL && Tnz != NULL && Count != NULL) ;
// This does not need to be parallel. The calloc does not take O(n) time.
if (ok)
{
for (int tid = 0 ; tid < nth ; tid++)
{
GB_MALLOC_MEMORY (Works [tid], n, zsize) ;
GB_CALLOC_MEMORY (Marks [tid], n, sizeof (bool)) ;
ok = ok && (Works [tid] != NULL && Marks [tid] != NULL) ;
}
}
if (!ok)
{
// out of memory
if (Works != NULL)
{
for (int tid = 0 ; tid < nth ; tid++)
{
GB_FREE_MEMORY (Works [tid], n, zsize) ;
}
}
if (Marks != NULL)
{
for (int tid = 0 ; tid < nth ; tid++)
{
GB_FREE_MEMORY (Marks [tid], n, sizeof (bool)) ;
}
}
GB_FREE_MEMORY (Works, nth, sizeof (GB_CTYPE *)) ;
GB_FREE_MEMORY (Marks, nth, sizeof (bool *)) ;
GB_FREE_MEMORY (Tnz, nth, sizeof (int64_t)) ;
GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ;
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// reduce each slice in its own workspace
//--------------------------------------------------------------------------
// each thread reduces its own slice in parallel
int tid ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (tid = 0 ; tid < nth ; tid++)
{
//----------------------------------------------------------------------
// get the workspace for this thread
//----------------------------------------------------------------------
GB_CTYPE *GB_RESTRICT Work = Works [tid] ;
bool *GB_RESTRICT Mark = Marks [tid] ;
int64_t my_tnz = 0 ;
//----------------------------------------------------------------------
// reduce the entries
//----------------------------------------------------------------------
for (int64_t p = pstart_slice [tid] ; p < pstart_slice [tid+1] ;p++)
{
int64_t i = Ai [p] ;
// ztype aij = (ztype) Ax [p], with typecast
GB_SCALAR (aij) ;
GB_CAST_ARRAY_TO_SCALAR (aij, Ax, p) ;
if (!Mark [i])
{
// first time index i has been seen
// Work [i] = aij ; no typecast
GB_COPY_SCALAR_TO_ARRAY (Work, i, aij) ;
Mark [i] = true ;
my_tnz++ ;
}
else
{
// Work [i] += aij ; no typecast
GB_ADD_SCALAR_TO_ARRAY (Work, i, aij) ;
}
}
Tnz [tid] = my_tnz ;
}
//--------------------------------------------------------------------------
// reduce all workspace to Work [0] and count # entries in T
//--------------------------------------------------------------------------
GB_CTYPE *GB_RESTRICT Work0 = Works [0] ;
bool *GB_RESTRICT Mark0 = Marks [0] ;
int64_t tnz = Tnz [0] ;
if (nth > 1)
{
int64_t i ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:tnz)
for (i = 0 ; i < n ; i++)
{
for (int tid = 1 ; tid < nth ; tid++)
{
const bool *GB_RESTRICT Mark = Marks [tid] ;
if (Mark [i])
{
// thread tid has a contribution to index i
const GB_CTYPE *GB_RESTRICT Work = Works [tid] ;
if (!Mark0 [i])
{
// first time index i has been seen
// Work0 [i] = Work [i] ; no typecast
GB_COPY_ARRAY_TO_ARRAY (Work0, i, Work, i) ;
Mark0 [i] = true ;
tnz++ ;
}
else
{
// Work0 [i] += Work [i] ; no typecast
GB_ADD_ARRAY_TO_ARRAY (Work0, i, Work, i) ;
}
}
}
}
// free all but workspace for thread 0
for (int tid = 1 ; tid < nth ; tid++)
{
GB_FREE_MEMORY (Works [tid], n, zsize) ;
GB_FREE_MEMORY (Marks [tid], n, sizeof (bool)) ;
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_MEMORY (Works, nth, sizeof (GB_CTYPE *)) ;
GB_FREE_MEMORY (Marks, nth, sizeof (bool *)) ;
GB_FREE_MEMORY (Tnz, nth, sizeof (int64_t)) ;
//--------------------------------------------------------------------------
// allocate T
//--------------------------------------------------------------------------
// since T is a GrB_Vector, it is CSC and not hypersparse
GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true,
GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, tnz, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_MEMORY (Work0, n, zsize) ;
GB_FREE_MEMORY (Mark0, n, sizeof (bool)) ;
GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ;
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
T->p [0] = 0 ;
T->p [1] = tnz ;
int64_t *GB_RESTRICT Ti = T->i ;
GB_CTYPE *GB_RESTRICT Tx = T->x ;
T->nvec_nonempty = (tnz > 0) ? 1 : 0 ;
//--------------------------------------------------------------------------
// gather the results into T
//--------------------------------------------------------------------------
if (tnz == n)
{
//----------------------------------------------------------------------
// T is dense: transplant Work0 into T->x
//----------------------------------------------------------------------
int64_t i ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (i = 0 ; i < n ; i++)
{
Ti [i] = i ;
}
GB_FREE_MEMORY (T->x, n, zsize) ;
T->x = Work0 ;
Work0 = NULL ;
}
else
{
//----------------------------------------------------------------------
// T is sparse: gather from Work0 and Mark0
//----------------------------------------------------------------------
if (nthreads == 1)
{
//------------------------------------------------------------------
// gather sparse T using a single thread
//------------------------------------------------------------------
int64_t p = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
if (Mark0 [i])
{
Ti [p] = i ;
// Tx [p] = Work0 [i], no typecast
GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ;
p++ ;
}
}
ASSERT (p == tnz) ;
}
else
{
//------------------------------------------------------------------
// gather sparse T using multiple threads
//------------------------------------------------------------------
// Some tasks may be completely empty and thus take no time at all;
// 256 tasks per thread are created for better load balancing.
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t ifirst, ilast, p = 0 ;
GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ;
for (int64_t i = ifirst ; i < ilast ; i++)
{
p += Mark0 [i] ;
}
Count [taskid] = p ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t ifirst, ilast, p = Count [taskid] ;
int64_t my_count = (Count [taskid+1] - p) ;
GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ;
if (my_count > 0)
{
for (int64_t i = ifirst ; i < ilast ; i++)
{
if (Mark0 [i])
{
Ti [p] = i ;
// Tx [p] = Work0 [i], no typecast
GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ;
p++ ;
}
}
}
}
#ifdef GB_DEBUG
// check result using a single thread
int64_t p = 0 ;
for (int64_t i = 0 ; i < n ; i++)
{
if (Mark0 [i])
{
ASSERT (Ti [p] == i) ;
p++ ;
}
}
ASSERT (p == tnz) ;
#endif
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (Work0, n, zsize) ;
GB_FREE_MEMORY (Mark0, n, sizeof (bool)) ;
}
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include <map>
#include <numeric>
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#endif
#include "Particle/DistanceTableData.h"
#include "LongRange/StructFact.h"
#include <CPU/SIMD/aligned_allocator.hpp>
#include <CPU/SIMD/algorithm.hpp>
namespace qmcplusplus
{
// helper class to activate KEcorr during optimizing Jastrow
template<typename RT, class FT>
class J2KECorrection
{
size_t num_groups_;
std::vector<size_t> num_elec_in_groups_;
RT num_elecs_;
RT vol;
RT G0mag;
const std::vector<FT*>& F_;
bool SK_enabled;
public:
J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F)
: num_groups_(targetPtcl.groups()),
num_elecs_(targetPtcl.getTotalNum()),
vol(targetPtcl.Lattice.Volume),
F_(F),
SK_enabled(targetPtcl.SK != nullptr)
{
// compute num_elec_in_groups_
num_elec_in_groups_.reserve(3);
for (int i = 0; i < num_groups_; i++)
num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i));
if (SK_enabled)
G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]);
}
RT computeKEcorr()
{
if (!SK_enabled)
return 0;
const int numPoints = 1000;
RT uk = 0.0;
RT a = 1.0;
for (int i = 0; i < num_groups_; i++)
{
int Ni = num_elec_in_groups_[i];
for (int j = 0; j < num_groups_; j++)
{
int Nj = num_elec_in_groups_[j];
if (F_[i * num_groups_ + j])
{
FT& ufunc = *(F_[i * num_groups_ + j]);
RT radius = ufunc.cutoff_radius;
RT k = G0mag;
RT dr = radius / (RT)(numPoints - 1);
for (int ir = 0; ir < numPoints; ir++)
{
RT r = dr * (RT)ir;
RT u = ufunc.evaluate(r);
uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj);
}
}
}
}
for (int iter = 0; iter < 20; iter++)
a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a)));
return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_;
}
};
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
class J2OrbitalSoA : public WaveFunctionComponent
{
public:
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using DistRow = DistanceTableData::DistRow;
using DisplRow = DistanceTableData::DisplRow;
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
// Ye: leaving this public is bad but currently used by unit tests.
///Container for \f$F[ig*NumGroups+jg]\f$.
std::vector<FT*> F;
protected:
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Uniquue J2 set for cleanup
std::map<std::string, FT*> J2Unique;
/// e-e table ID
const int my_table_ID_;
// helper for compute J2 Chiesa KE correction
J2KECorrection<RealType, FT> j2_ke_corr_helper;
public:
J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if (dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if (dPsi)
dPsi->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); }
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L);
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
PsiValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] =
std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).getDistRow(k)));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const DistRow& dist)
{
valT curUat(0);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist.data(), DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle = false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const DisplRow& displ) const
{
posT grad;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < N; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return grad;
}
/**@} */
RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); }
RealType KECorrection() { return KEcorr; }
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid)
: WaveFunctionComponent("J2OrbitalSoA", obj_name), my_table_ID_(p.addTable(p)), j2_ke_corr_helper(p, F)
{
if (myName.empty())
throw std::runtime_error("J2OrbitalSoA object name cannot be empty!");
init(p);
KEcorr = 0.0;
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it = J2Unique.begin();
while (it != J2Unique.end())
{
delete ((*it).second);
++it;
}
} //need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N = p.getTotalNum();
N_padded = getAlignedSize<valT>(N);
NumGroups = p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups * NumGroups, nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if (ia == ib)
{
if (ia == 0) //first time, assign everything
{
int ij = 0;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg, ++ij)
if (F[ij] == nullptr)
F[ij] = j;
}
else
F[ia * NumGroups + ib] = j;
}
else
{
if (N == 2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg)
F[ig * NumGroups + jg] = j;
}
else
{
// generic case
F[ia * NumGroups + ib] = j;
F[ib * NumGroups + ia] = j;
}
}
std::stringstream aname;
aname << ia << ib;
J2Unique[aname.str()] = j;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(myName, tqp, -1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*, FT*> fcmap;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = ig; jg < NumGroups; ++jg)
{
int ij = ig * NumGroups + jg;
if (F[ij] == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]);
if (fit == fcmap.end())
{
FT* fc = new FT(*F[ij]);
j2copy->addFunc(ig, jg, fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]] = fc;
}
}
j2copy->KEcorr = KEcorr;
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle)
{
const int jelmax = triangle ? iat : N;
constexpr valT czero(0);
std::fill_n(u, jelmax, czero);
std::fill_n(du, jelmax, czero);
std::fill_n(d2u, jelmax, czero);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax, P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist.data(), u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode = ORB_PBYP_RATIO;
cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).getTempDists());
return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat));
}
template<typename FT>
inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& d_table = P.getDistTable(my_table_ID_);
const auto& dist = d_table.getTempDists();
for (int ig = 0; ig < NumGroups; ++ig)
{
const int igt = ig * NumGroups;
valT sumU(0);
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist.data(), DistCompressed.data());
}
for (int i = P.first(ig); i < P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt + ig]->evaluate(dist[i]);
ratios[i] = std::exp(Uat[i] + Uself - sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(my_table_ID_).getTempDists(), cur_u.data(), cur_du.data(), cur_d2u.data());
cur_Uat = simd::accumulate_n(cur_u.data(), N, valT());
DiffVal = Uat[iat] - cur_Uat;
grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).getTempDispls());
return std::exp(static_cast<PsiValueType>(DiffVal));
}
template<typename FT>
void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat, bool safe_to_delay)
{
// get the old u, du, d2u
const auto& d_table = P.getDistTable(my_table_ID_);
computeU3(P, iat, d_table.getOldDists(), old_u.data(), old_du.data(), old_d2u.data());
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
const auto& dist = d_table.getTempDists();
computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr = d_table.getTempDispls();
const auto& old_dr = d_table.getOldDispls();
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : cur_d2Uat)
for (int jat = 0; jat < N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac * cur_du[jat];
const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict new_dX = new_dr.data(idim);
const valT* restrict old_dX = old_dr.data(idim);
const valT* restrict cur_du_pt = cur_du.data();
const valT* restrict old_du_pt = old_du.data();
valT* restrict save_g = dUat.data(idim);
valT cur_g = cur_dUat[idim];
#pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt)
for (int jat = 0; jat < N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat] * old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const auto& d_table = P.getDistTable(my_table_ID_);
for (int ig = 0; ig < NumGroups; ++ig)
{
for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat)
{
computeU3(P, iat, d_table.getDistRow(iat), cur_u.data(), cur_du.data(), cur_d2u.data(), true);
Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const auto& displ = d_table.getDisplRow(iat);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : lap) aligned(du, d2u)
for (int jat = 0; jat < iat; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < iat; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
dUat(iat) = grad;
d2Uat[iat] = -lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u, du, d2u)
for (int jat = 0; jat < iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat] + lapfac * du[jat];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict dX = displ.data(idim);
#pragma omp simd aligned(save_g, du, dX)
for (int jat = 0; jat < iat; jat++)
save_g[jat] -= du[jat] * dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P, G, L, true);
return LogValue;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
LogValue = -LogValue * 0.5;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
LogValue = 0.0;
const DistanceTableData& d_ee(P.getDistTable(my_table_ID_));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int i = 1; i < N; ++i)
{
const auto& dist = d_ee.getDistRow(i);
const auto& displ = d_ee.getDisplRow(i);
auto ig = P.GroupID[i];
const int igt = ig * NumGroups;
for (int j = 0; j < i; ++j)
{
auto r = dist[j];
auto rinv = 1.0 / r;
auto dr = displ[j];
auto jg = P.GroupID[j];
auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2);
LogValue -= uij;
auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
grad_grad_psi[i] -= hess;
grad_grad_psi[j] -= hess;
}
}
}
} // namespace qmcplusplus
#endif
|
RCCE_malloc.c | //***************************************************************************************
// MPB memory allocation routines.
//***************************************************************************************
//
// Author: Rob F. Van der Wijngaart
// Intel Corporation
// Date: 008/30/2010
//
//***************************************************************************************
//
// Copyright 2010 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "RCCE_lib.h"
//......................................................................................
// GLOBAL VARIABLES USED BY THE LIBRARY
//......................................................................................
static RCCE_BLOCK_S RCCE_space; // data structure used for trscking MPB memory blocks
static RCCE_BLOCK_S *RCCE_spacep; // pointer to RCCE_space
#ifdef _OPENMP
#pragma omp threadprivate (RCCE_space, RCCE_spacep)
#endif
// END GLOBAL VARIABLES USED BY THE LIBRARY
//......................................................................................
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_malloc_init
//--------------------------------------------------------------------------------------
// initialize memory allocator
//--------------------------------------------------------------------------------------
void RCCE_malloc_init(
t_vcharp mem, // pointer to MPB space that is to be managed by allocator
size_t size // size (bytes) of managed space
) {
#ifndef GORY
// in the simplified API MPB memory allocation merely uses running pointers
RCCE_flags_start = mem;
RCCE_chunk = size;
RCCE_buff_ptr = mem;
#else
// create one block containing all memory for truly dynamic memory allocator
RCCE_spacep = &RCCE_space;
RCCE_spacep->tail = (RCCE_BLOCK *) malloc(sizeof(RCCE_BLOCK));
RCCE_spacep->tail->free_size = size;
RCCE_spacep->tail->space = mem;
/* make a circular list by connecting tail to itself */
RCCE_spacep->tail->next = RCCE_spacep->tail;
#endif
}
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_malloc
//--------------------------------------------------------------------------------------
// Allocate memory inside MPB. In restricted mode we only use it to allocate new
// flags prompted by the creation of new communicators. Since communicators are never
// deleted, we do not need to deallocate MPB memory, so we can simply keep running
// pointers of where the next flag will be stored, and where payload data can go. In
// GORY mode we need to support fully dynamic memory allocation and deallocation.
//--------------------------------------------------------------------------------------
t_vcharp RCCE_malloc(
size_t size // requested space
) {
t_vcharp result;
#ifndef GORY
// new flag takes exactly one cache line, whether using single bit flags are not
if (size != RCCE_LINE_SIZE) {
fprintf(stderr, "ERROR in RCCE_malloc(): size != RCCE_LINE_SIZE!\n");
exit(-1);
return(0);
}
// if chunk size becomes zero, we have allocated too many flags
if (!(RCCE_chunk-RCCE_LINE_SIZE)) {
fprintf(stderr, "ERROR in RCCE_malloc(): No more MPB space left!\n");
exit(-1);
return(0);
}
result = RCCE_flags_start;
// reduce maximum size of message payload chunk
RCCE_chunk -= RCCE_LINE_SIZE;
// move running pointer to next available flags line
RCCE_flags_start += RCCE_LINE_SIZE;
// move running pointer to new start of payload data area
RCCE_buff_ptr += RCCE_LINE_SIZE;
return(result);
#else
// simple memory allocator, loosely based on public domain code developed by
// Michael B. Allen and published on "The Scripts--IT /Developers Network".
// Approach:
// - maintain linked list of pointers to memory. A block is either completely
// malloced (free_size = 0), or completely free (free_size > 0).
// The space field always points to the beginning of the block
// - malloc: traverse linked list for first block that has enough space
// - free: Check if pointer exists. If yes, check if the new block should be
// merged with neighbors. Could be one or two neighbors.
RCCE_BLOCK *b1, *b2, *b3; // running pointers for blocks
if (size==0 || size%RCCE_LINE_SIZE!=0) return 0;
// always first check if the tail block has enough space, because that
// is the most likely. If it does and it is exactly enough, we still
// create a new block that will be the new tail, whose free space is
// zero. This acts as a marker of where free space of predecessor ends
b1 = RCCE_spacep->tail;
if (b1->free_size >= size) {
// need to insert new block; new order is: b1->b2 (= new tail)
b2 = (RCCE_BLOCK *) malloc(sizeof(RCCE_BLOCK));
b2->next = b1->next;
b1->next = b2;
b2->free_size = b1->free_size-size;
b2->space = b1->space + size;
b1->free_size = 0;
// need to update the tail
RCCE_spacep->tail = b2;
return(b1->space);
}
// tail didn't have enough space; loop over whole list from beginning
while (b1->next->free_size < size) {
if (b1->next == RCCE_spacep->tail) {
return NULL; // we came full circle
}
b1 = b1->next;
}
b2 = b1->next;
if (b2->free_size > size) { // split block; new block order: b1->b2->b3
b3 = (RCCE_BLOCK *) malloc(sizeof(RCCE_BLOCK));
b3->next = b2->next; // reconnect pointers to add block b3
b2->next = b3; // " " " " " "
b3->free_size = b2->free_size - size; // b3 gets remainder free space
b3->space = b2->space + size; // need to shift space pointer
}
b2->free_size = 0; // block b2 is completely used
return (b2->space);
#endif
}
t_vcharp RCCE_palloc(
size_t size, // requested space
int CoreID // location
) {
t_vcharp result = RCCE_malloc(size);
if (result)
result = RCCE_comm_buffer[CoreID]+(result-RCCE_comm_buffer[RCCE_IAM]);
return result;
}
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_free
//--------------------------------------------------------------------------------------
// Deallocate memory in MPB; only used in GORY mode
//--------------------------------------------------------------------------------------
void RCCE_free(
t_vcharp ptr // pointer to data to be freed
) {
RCCE_BLOCK *b1, *b2, *b3; // running block pointers
int j1, j2; // booleans determining merging of blocks
// loop over whole list from the beginning until we locate space ptr
b1 = RCCE_spacep->tail;
while (b1->next->space != ptr && b1->next != RCCE_spacep->tail) {
b1 = b1->next;
}
// b2 is target block whose space must be freed
b2 = b1->next;
// tail either has zero free space, or hasn't been malloc'ed
if (b2 == RCCE_spacep->tail) return;
// reset free space for target block (entire block)
b3 = b2->next;
b2->free_size = b3->space - b2->space;
// determine with what non-empty blocks the target block can be merged
j1 = (b1->free_size>0 && b1!=RCCE_spacep->tail); // predecessor block
j2 = (b3->free_size>0 || b3==RCCE_spacep->tail); // successor block
if (j1) {
if (j2) { // splice all three blocks together: (b1,b2,b3) into b1
b1->next = b3->next;
b1->free_size += b3->free_size + b2->free_size;
if (b3==RCCE_spacep->tail) RCCE_spacep->tail = b1;
free(b3);
}
else { // only merge (b1,b2) into b1
b1->free_size += b2->free_size;
b1->next = b3;
}
free(b2);
}
else {
if (j2) { // only merge (b2,b3) into b2
b2->next = b3->next;
b2->free_size += b3->free_size;
if (b3==RCCE_spacep->tail) RCCE_spacep->tail = b2;
free(b3);
}
}
}
//--------------------------------------------------------------------------------------
// FUNCTION: RCCE_malloc_request
//--------------------------------------------------------------------------------------
// this function tries to return a (padded) amount of space in the MPB of size
// "size" bytes. If not available, the function keeps halving space until it fits
//--------------------------------------------------------------------------------------
t_vcharp RCCE_malloc_request(
size_t size, // requested number of bytes
size_t *chunk // number of bytes of space returned
) {
t_vcharp combuf;
combuf = 0;
*chunk = PAD32byte(size);
while (!combuf && *chunk >= RCCE_LINE_SIZE) {
combuf = RCCE_malloc(*chunk);
if (!combuf) *chunk = PAD32byte(*chunk/2);
}
return (combuf);
}
|
LaplacianSmoothing.h | /*
* LaplacianSmoothing.h
*
* Created on: Nov 7, 2018
* Author: sebastian
*/
#ifndef LAPLACIANSMOOTHING_H_
#define LAPLACIANSMOOTHING_H_
#include <unordered_set>
#include "marchingCubes.h"
using namespace std;
static inline void reduce_point3D_vector(vector<point3D> & output, vector<point3D> const & input) {
for (size_t ii = 0; ii < output.size(); ++ii) {
output[ii] += input[ii];
}
}
#pragma omp declare reduction(point3D_vec_reduction : vector<point3D> : reduce_point3D_vector(omp_out, omp_in)) initializer(omp_priv(omp_orig))
static inline void computeNormals(vector<faceinfo> const & faces, vector<point3D> const & vertices,
vector<point3D> & normals) {
normals = vector<point3D>(vertices.size(), point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime) reduction(point3D_vec_reduction:normals)
for (size_t ii = 0; ii < faces.size(); ++ii) {
point3D const & p1 = vertices[faces[ii].a];
point3D const & p2 = vertices[faces[ii].b];
point3D const & p3 = vertices[faces[ii].c];
point3D n = (p1 - p2) * (p1 - p3);
normals[faces[ii].a] += n;
normals[faces[ii].b] += n;
normals[faces[ii].c] += n;
}
#pragma omp parallel for schedule(runtime)
for (size_t ii = 0; ii < normals.size(); ++ii)
normals[ii].normalize();
}
static inline void reduce_vertex_nb(vector<vector<uint32_t>> & output, vector<vector<uint32_t>> const & input) {
size_t vertnumber = output[0].size();
for (size_t ii = 0; ii < vertnumber; ++ii) {
size_t outvert = output[0][ii];
for (size_t j = 0; j < input[0][ii]; ++j) {
bool newvert = true;
for (size_t k = 0; k < outvert; ++k) {
if (input[j+1][ii] == output[k+1][ii]) {
newvert = false;
break;
}
}
if (newvert) {
output[0][ii]++;
output[output[0][ii]][ii] = input[j+1][ii];
}
}
}
}
#pragma omp declare reduction(vertex_nb_reduction : vector<vector<uint32_t>> : reduce_vertex_nb(omp_out, omp_in)) initializer(omp_priv(omp_orig))
static inline void oneRingNeighbourhood(vector<faceinfo> const & faces, vector<point3D> const & vertices,
vector<vector<uint32_t>> & vertex_nb) {
size_t vertnumber = vertices.size();
size_t facenumber = faces.size();
const int numthreads = omp_get_max_threads();
vector<vector<vector<uint32_t>>> vertex_nb_th(numthreads);
//#pragma omp parallel for schedule(runtime) reduction(vertex_nb_reduction:vertex_nb)
#pragma omp parallel
{
const int ithread = omp_get_thread_num();
vertex_nb_th[ithread] = vector<vector<uint32_t>>(20, vector<uint32_t>(vertnumber, 0));
#pragma omp for schedule(runtime)
for (size_t i = 0; i < facenumber; i++) {
// insert neighbours of vertex a
bool newvert = true;
for (size_t j = 0; j < vertex_nb_th[ithread][0][faces[i].a]; ++j) {
if (faces[i].b == vertex_nb_th[ithread][j+1][faces[i].a]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb_th[ithread][0][faces[i].a]++;
vertex_nb_th[ithread][vertex_nb_th[ithread][0][faces[i].a]][faces[i].a] = faces[i].b;
}
newvert = true;
for (size_t j = 0; j < vertex_nb_th[ithread][0][faces[i].a]; ++j) {
if (faces[i].c == vertex_nb_th[ithread][j+1][faces[i].a]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb_th[ithread][0][faces[i].a]++;
vertex_nb_th[ithread][vertex_nb_th[ithread][0][faces[i].a]][faces[i].a] = faces[i].c;
}
// insert neighbours of vertex b
newvert = true;
for (size_t j = 0; j < vertex_nb_th[ithread][0][faces[i].b]; ++j) {
if (faces[i].a == vertex_nb_th[ithread][j+1][faces[i].b]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb_th[ithread][0][faces[i].b]++;
vertex_nb_th[ithread][vertex_nb_th[ithread][0][faces[i].b]][faces[i].b] = faces[i].a;
}
newvert = true;
for (size_t j = 0; j < vertex_nb_th[ithread][0][faces[i].b]; ++j) {
if (faces[i].c == vertex_nb_th[ithread][j+1][faces[i].b]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb_th[ithread][0][faces[i].b]++;
vertex_nb_th[ithread][vertex_nb_th[ithread][0][faces[i].b]][faces[i].b] = faces[i].c;
}
// insert neighbours of vertex c
newvert = true;
for (size_t j = 0; j < vertex_nb_th[ithread][0][faces[i].c]; ++j) {
if (faces[i].a == vertex_nb_th[ithread][j+1][faces[i].c]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb_th[ithread][0][faces[i].c]++;
vertex_nb_th[ithread][vertex_nb_th[ithread][0][faces[i].c]][faces[i].c] = faces[i].a;
}
newvert = true;
for (size_t j = 0; j < vertex_nb_th[ithread][0][faces[i].c]; ++j) {
if (faces[i].b == vertex_nb_th[ithread][j+1][faces[i].c]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb_th[ithread][0][faces[i].c]++;
vertex_nb_th[ithread][vertex_nb_th[ithread][0][faces[i].c]][faces[i].c] = faces[i].b;
}
}
}
vertex_nb = vertex_nb_th[0];
#pragma omp parallel for schedule(runtime)
for (size_t ii = 0; ii < vertnumber; ++ii) {
for (size_t ithread = 1; ithread < numthreads; ++ithread) {
size_t outvert = vertex_nb[0][ii];
for (size_t j = 0; j < vertex_nb_th[ithread][0][ii]; ++j) {
bool newvert = true;
for (size_t k = 0; k < outvert; ++k) {
if (vertex_nb_th[ithread][j+1][ii] == vertex_nb[k+1][ii]) {
newvert = false;
break;
}
}
if (newvert) {
vertex_nb[0][ii]++;
vertex_nb[vertex_nb[0][ii]][ii] = vertex_nb_th[ithread][j+1][ii];
}
}
}
}
}
static inline void LaplacianSmoothing(int smoothing_iterations, float lambda, float mu,
vector<faceinfo> & faces, vector<point3D> & vertices,
float resolution) {
size_t vertnumber = vertices.size();
size_t facenumber = faces.size();
/**
* vertex_nb is a 2D array that indicates the immediate neighbours of all vertices in the mesh.
* vertex_nb[0][i] contains the number of neighbours for vertex i, i.e. vertices[i]
* say vertex_nb[0][i] is N, then the indices of the neighbours of vertices[i] are
* in vertex_nb[1][i], vertex_nb[2][i], ..., vertex_nb[N][i]
*/
vector<vector<uint32_t>> vertex_nb;
oneRingNeighbourhood(faces, vertices, vertex_nb);
for (size_t k = 0; k < smoothing_iterations; k++) {
vector<point3D> tps(vertnumber, point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
for (uint32_t j = 0; j < vertex_nb[0][i]; ++j) {
tps[i] += vertices[vertex_nb[j+1][i]];
}
tps[i] /= vertex_nb[0][i];
tps[i] -= vertices[i];
tps[i] *= lambda;
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
for (uint32_t j = 0; j < vertex_nb[0][i]; ++j) {
tps[i] += vertices[vertex_nb[j+1][i]];
}
tps[i] /= vertex_nb[0][i];
tps[i] -= vertices[i];
tps[i] *= mu;
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
}
}
static inline void LaplacianSmoothing2(int smoothing_iterations, float lambda, float mu,
vector<faceinfo> & faces, vector<point3D> & vertices,
float resolution) {
size_t vertnumber = vertices.size();
size_t facenumber = faces.size();
/**
* vertex_nb is a 2D array that indicates the immediate neighbours of all vertices in the mesh.
* vertex_nb[0][i] contains the number of neighbours for vertex i, i.e. vertices[i]
* say vertex_nb[0][i] is N, then the indices of the neighbours of vertices[i] are
* in vertex_nb[1][i], vertex_nb[2][i], ..., vertex_nb[N][i]
*/
vector<vector<uint32_t>> vertex_nb;
oneRingNeighbourhood(faces, vertices, vertex_nb);
for (size_t k = 0; k < smoothing_iterations; k++) {
vector<point3D> tps(vertnumber, point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
for (uint32_t j = 0; j < vertex_nb[0][i]; ++j) {
tps[i] += vertices[vertex_nb[j + 1][i]];
}
tps[i] /= vertex_nb[0][i];
tps[i] -= vertices[i];
tps[i] *= lambda;
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
tps = vector<point3D>(vertnumber, point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
if (vertex_nb[0][i] > 5) {
for (uint32_t j = 0; j < vertex_nb[0][i]; ++j) {
tps[i] += vertices[vertex_nb[j + 1][i]];
}
tps[i] /= vertex_nb[0][i];
tps[i] -= vertices[i];
tps[i] *= mu;
}
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
}
}
static inline void LaplacianSmoothing3(int smoothing_iterations, float lambda, float mu,
vector<faceinfo> & faces, vector<point3D> & vertices,
float resolution) {
size_t vertnumber = vertices.size();
size_t facenumber = faces.size();
/**
* vertex_nb is a 2D array that indicates the immediate neighbours of all vertices in the mesh.
* vertex_nb[0][i] contains the number of neighbours for vertex i, i.e. vertices[i]
* say vertex_nb[0][i] is N, then the indices of the neighbours of vertices[i] are
* in vertex_nb[1][i], vertex_nb[2][i], ..., vertex_nb[N][i]
*/
vector<vector<uint32_t>> vertex_nb;
oneRingNeighbourhood(faces, vertices, vertex_nb);
for (size_t k = 0; k < smoothing_iterations; k++) {
vector<point3D> tps(vertnumber, point3D(0, 0, 0));
vector<unordered_set<uint32_t>> twoRingNeighbourhood(vertnumber);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
for (uint32_t j = 1; j <= vertex_nb[0][i]; ++j) {
for (uint32_t k = 1; k <= vertex_nb[0][vertex_nb[j][i]]; ++k) {
twoRingNeighbourhood[i].insert(vertex_nb[k][vertex_nb[j][i]]);
}
}
for (auto const & j : twoRingNeighbourhood[i]) {
tps[i] += vertices[j];
}
tps[i] /= twoRingNeighbourhood[i].size();
tps[i] -= vertices[i];
tps[i] *= lambda;
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
tps = vector<point3D>(vertnumber, point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
for (auto const & j : twoRingNeighbourhood[i]) {
tps[i] += vertices[j];
}
tps[i] /= twoRingNeighbourhood[i].size();
tps[i] -= vertices[i];
tps[i] *= mu;
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
}
}
static inline void newSmoothing(int smoothing_iterations, float beta,
vector<faceinfo> & faces, vector<point3D> & vertices,
float resolution) {
size_t vertnumber = vertices.size();
size_t facenumber = faces.size();
/**
* vertex_nb is a 2D array that indicates the immediate neighbours of all vertices in the mesh.
* vertex_nb[0][i] contains the number of neighbours for vertex i, i.e. vertices[i]
* say vertex_nb[0][i] is N, then the indices of the neighbours of vertices[i] are
* in vertex_nb[1][i], vertex_nb[2][i], ..., vertex_nb[N][i]
*/
vector<vector<uint32_t>> vertex_nb;
oneRingNeighbourhood(faces, vertices, vertex_nb);
for (size_t k = 0; k < smoothing_iterations; k++) {
vector<point3D> tps(vertnumber, point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
for (uint32_t j = 1; j <= vertex_nb[0][i]; ++j) {
tps[i] += vertices[vertex_nb[j][i]];
}
tps[i] /= vertex_nb[0][i];
tps[i] -= vertices[i];
if (vertex_nb[0][i] > 4)
tps[i] *= beta;
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
tps = vector<point3D>(vertnumber, point3D(0, 0, 0));
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
if (vertex_nb[0][i] > 4) {
for (uint32_t j = 1; j <= vertex_nb[0][i]; ++j) {
tps[i] += vertices[vertex_nb[j][i]];
}
tps[i] /= vertex_nb[0][i];
tps[i] -= vertices[i];
tps[i] *= beta;
}
}
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < vertnumber; i++) {
vertices[i] += tps[i];
}
}
}
#endif /* LAPLACIANSMOOTHING_H_ */
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/policy.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
if (image->matte != MagickTrue)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->matte == MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelPacket
*magick_restrict q;
PixelPacket
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,PixelPacket *q,
IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
PixelPacket
*color;
IndexPacket
index;
index=(IndexPacket) pixel;
if (packet_size == 1)
index=(IndexPacket) ScaleQuantumToChar(index);
index=ConstrainColormapIndex(image,(ssize_t) index);
if (type == 0)
SetPixelIndex(indexes+x,index);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(indexes+x);
if (type != 0)
SetPixelAlpha(color,pixel);
SetPixelRGBO(q,color);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if ((channels < 3) || (type == -2))
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case -3:
case 1:
{
SetPixelGreen(q,pixel);
break;
}
case -4:
case 2:
{
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
IndexPacket
*indexes;
PixelPacket
*q;
ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x);
q++;
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048))
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
mask->matte=MagickFalse;
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
(void) SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
(void) SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->matte=MagickTrue;
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->matte=MagickTrue;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickFalse);
return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize));
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace);
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if (has_merged_image != MagickFalse || image_list_length == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.opacity=TransparentOpacity;
(void) SetImageBackgroundColor(image);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
next=image;
while (next != (Image *) NULL)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->magick_module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->magick_module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
ssize_t
i,
offset,
y;
if (next_image->compression == RLECompression)
{
offset=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
offset+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
offset=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
offset=WriteBlobMSBShort(image,Raw);
return((size_t) offset);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const PixelPacket
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
length,
offset_length;
ssize_t
count;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsGrayImage(next_image,&next_image->exception) != MagickFalse))
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ?
4 : 3);
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t)
channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
ssize_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return((size_t) count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,&image->exception) != MagickFalse))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
(void) SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ?
4 : 3);
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(unsigned char) (
mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
stresslet_real_rc.c | #include "stresslet_real_rc.h"
#include "cell_list.h"
#ifdef BEENAKKER
#include "beenakker_op_fd.h"
#else
#error "Must provide -D<method> to compiler"
#endif
#ifdef _OPENMP
#define CRITICAL _Pragma("omp critical")
#else
#define CRITICAL
#endif
#define SWAP(x,y) { tmp=x;x=y;y=tmp; }
static void quicksort(int* restrict list, int* restrict slave, int m, int n);
static void barrier(int bar_num, int *barrier_in, int *barrier_out, int *num_procs);
static void transpose(const double* restrict in, double* restrict out, const int N);
// ==== GENERATE TRIPLETS FOR MATRIX ASSEMBLY
void get_rs_triplets (
const double* restrict x_in,
const double* restrict nvec_in,
const int N,
const double* restrict box,
const double xi,
const double rc,
const int nlhs,
int* restrict *row_p,
int* restrict *col_p,
double* restrict val[3][3],
int* restrict *buck_size_p,
int* restrict *idx_in_array_p,
int* numel_p
)
{
// Fix input (legacy format gives bad memory access)
double* restrict x = __MALLOC(3*N*sizeof(double));
double* restrict nvec = __MALLOC(3*N*sizeof(double));
transpose(x_in, x, N);
transpose(nvec_in, nvec, N);
// Setup output variables
int* restrict row;
int* restrict col;
int* restrict idx_in_array;
int* restrict buck_size;
// Setup variables
int i,j;
int ncell[3];
int* restrict ll;
int* restrict head;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
struct timeval tic, toc;
gettimeofday(&tic, NULL);
double time_spent;
// Build cell list
build_linked_cell_list(x, N, box, rc, &rn, ncell, &ll, &head);
if(VERBOSE)
{
__PRINTF("[RSRC] SPARSE MATRIX\n");
__PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi);
__PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn);
__PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n",
box[0],box[1],box[2],
ncell[0],ncell[1],ncell[2]);
}
//============================================================
// CALCULATE INTERACTIONS
//
// For all vectors, go through neighbors and save interactions
// in vectors that are used to create a sparse matrix
// Allocate a guess based on average density +50%
int maxel = round( 1.5 * N*N*4*PI*rc*rc*rc/3/(box[0]*box[1]*box[2]) );
int numel = 0;
size_t malloc_tot = maxel * (2*sizeof(int) + 6*sizeof(double));
ASSERT(malloc_tot < MALLOC_MAX, "MALLOC_MAX exceeded");
row = __MALLOC(maxel*sizeof(int));
col = __MALLOC(maxel*sizeof(int));
for(i=0;i<=2;i++)
for(j=i;j<=2;j++)
{
val[i][j] = __MALLOC(maxel*sizeof(double));
}
#ifdef _OPENMP
int barrier_in[2] = {0,0};
int barrier_out[2] = {0,0};
int realloc_done=0;
int num_procs;
#pragma omp parallel private(i,j) \
shared(numel,maxel,row,col,val,box,x,nvec,head,ll,px,py,pz,ncell,rn,barrier_in,barrier_out,realloc_done,num_procs) \
default(none)
#endif
{ // Begin parallel section
int head_idx;
int icell[3], home_cell[3];
int idx_s,idx_t,ip;
double rsq;
double pshift[3], xs[3], ns[3], nt[3], xr[3];
double A1[3][3], A2[3][3];
const double rcsq = rc*rc;
// Allocate a bufffer of interactions to be written
// into triplet list
const int buf_size = 256;
int buf_cnt = 0;
int idx_buf, next_idx_t;
int* restrict buf_idx_t;
double* restrict buf_xr;
double* restrict buf_rsq;
double* restrict C;
double* restrict D;
int tnum = 0;
#ifdef _OPENMP
tnum = omp_get_thread_num();
#pragma omp single
num_procs = omp_get_num_threads();
if(VERBOSE)
{
#pragma omp master
__PRINTF("[RSRC] Running on %d threads.\n",num_procs);
}
// Seems mxMalloc/mxFree are not thread safe
#pragma omp critical
{
#endif
buf_idx_t = __MALLOC(buf_size*sizeof(int));
buf_xr = __MALLOC(3*buf_size*sizeof(double));
buf_rsq = __MALLOC(buf_size*sizeof(double));
C = __MALLOC(buf_size*sizeof(double));
D = __MALLOC(buf_size*sizeof(double));
#ifdef _OPENMP
}
#pragma omp for schedule(dynamic) nowait
#endif
// Loop over all points
for(idx_s=0;idx_s<N;idx_s++)
{
for(j=0; j<3; j++)
{
// Source point
xs[j] = x[idx_s*3+j];
// Determine home cell
home_cell[j] = xs[j]/rn;
// Source point normal vector
ns[j] = nvec[idx_s*3+j];
}
// Iterate through near cells (including home cell)
for(ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Periodic wrap
for(j=0; j<3; j++)
{
// (Could do this with mod)
pshift[j] = 0;
if(icell[j] >= ncell[j])
{
icell[j] = 0;
pshift[j] = box[j];
}
else if(icell[j]<0)
{
icell[j] = ncell[j]-1;
pshift[j] = -box[j];
}
}
head_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
idx_t = head[head_idx];
while(1)
{
if(idx_t > idx_s)
{
// r points from s to t
for(j=0; j<3; j++)
xr[j] = x[idx_t*3+j] + pshift[j] - xs[j];
// Check if we are within truncation radius
rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2];
if(rsq <= rcsq)
{
// Yes, so put interaction in buffer
buf_idx_t[buf_cnt] = idx_t;
buf_rsq[buf_cnt] = rsq;
for(i=0;i<3;i++)
buf_xr[3*buf_cnt+i] = xr[i];
buf_cnt++;
}
}
// Save location of next point in cell chain
if(idx_t == -1)
next_idx_t = -1;
else
next_idx_t = ll[idx_t];
// Empty buffer if last point of last neighbour,
// or buffer full
if ( (ip==26 && next_idx_t==-1) || buf_cnt==buf_size)
{
// Check if we have enough space to hold buffer contents
int idx_write, can_write;
#ifdef _OPENMP
#pragma omp critical
#endif
{ /* begin critical section */
// Check if buffer holds writing space for me
if(maxel-numel <= 2*buf_cnt) {
can_write = 0;
//__PRINTF("[%d] Can't write, reallocation needed! \n",tnum);
}
else
can_write = 1;
// Reserve writing in either case
idx_write = numel;
numel += 2*buf_cnt;
} /* end critical section */
/* Begin can_write==0 */
if(can_write==0)
{
int alloc_add = buf_size; // How much to add to allocation (single thread)
#ifdef _OPENMP
// Everybody has to wait here before reallocation
// Allocate more than a fuller buffer for every thread
alloc_add = num_procs*buf_size;
#pragma omp critical
realloc_done = 0; // Everybody agrees reallocation has not been done
barrier(0, barrier_in, barrier_out, &num_procs);
#pragma omp critical
{ // Critical section
if(realloc_done==0)
{
realloc_done=1;
#endif
// Allocate for full buffer(s) + 20% more
int new_maxel = ceil(1.2*(maxel+alloc_add));
if (VERBOSE)
__PRINTF("[RSRC][%d] Reallocating triplet vectors %d -> %d\n",tnum,maxel,new_maxel);
maxel = new_maxel;
row = __REALLOC(row, maxel*sizeof(int));
col = __REALLOC(col, maxel*sizeof(int));
for(i=0;i<=2;i++)
for(j=i;j<=2;j++)
val[i][j] = __REALLOC(val[i][j], maxel*sizeof(double));
#ifdef _OPENMP
//__PRINTF("[%d] Done \n",tnum);
}
else
{
//__PRINTF("[%d] Someone else reallocated \n",tnum);
}
}
barrier(1, barrier_in, barrier_out, &num_procs);
#endif
}
/* End can_write==0 */
// Do delayed calculations
op_A_CD(C,D,buf_rsq,buf_cnt,xi);
//#pragma omp critical
//__PRINTF("[%d] Begin write \n",tnum);
// Write triplets
for(idx_buf=0;idx_buf<buf_cnt;idx_buf++)
{
idx_t = buf_idx_t[idx_buf];
for(i=0;i<3;i++)
{
xr[i] = buf_xr[3*idx_buf+i];
// Source point normal vector
nt[i] = nvec[idx_t*3+i];
}
// Calculate interactions t->s and s<-t
op_A_symm_CD(A1,A2,xr,ns,nt,xi,C[idx_buf],D[idx_buf]);
// Append results to row,col,val vectors
row[idx_write] = idx_t;
col[idx_write] = idx_s;
for(i=0; i<=2; i++)
for(j=i; j<=2; j++)
{
val[i][j][idx_write] = A1[i][j];
}
idx_write++;
row[idx_write] = idx_s;
col[idx_write] = idx_t;
for(i=0; i<=2; i++)
for(j=i; j<=2; j++)
{
val[i][j][idx_write] = A2[i][j];
}
idx_write++;
} // endfor buffer
//#pragma omp critical
//__PRINTF("[%d] End write \n",tnum);
buf_cnt = 0;
} // endif chainend or buffull
idx_t = next_idx_t;
if(idx_t == -1)
break; // Chain ended
} // End of neighbours in this cell
} // End of cells
} // End of particles
#ifdef _OPENMP
#pragma omp critical
{
//__PRINTF("[%d] Exit loop , barrier_in={%d,%d}\n",tnum, barrier_in[0], barrier_in[1]);
#pragma omp atomic
// One less thread going around in loop
num_procs--;
}
#pragma omp critical
#endif
{
__FREE(buf_idx_t);
__FREE(buf_xr);
__FREE(buf_rsq);
__FREE(C);
__FREE(D);
}
} // End parallel section
// Free allocations
__FREE(head);
__FREE(ll);
__FREE(x);
__FREE(nvec);
if(VERBOSE) {
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
__PRINTF("[RSRC] Triplets generated in %.3f seconds.\n", time_spent);
}
// Reallocate (shrink) values to actual size used
gettimeofday(&tic, NULL);
for(i=0;i<=2;i++)
for(j=i;j<=2;j++)
{
double* tmp = val[i][j];
val[i][j] = __REALLOC(val[i][j], numel*sizeof(double));
if (tmp != val[i][j] && VERBOSE)
__PRINTF("[RSRC] Realloc moved val[%d][%d].\n", i, j);
}
if(VERBOSE) {
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
__PRINTF("[RSRC] Realloc %d->%d took %.3f seconds.\n", maxel, numel, time_spent);
}
//============================================
// SORT RESULTS WITH COUNTING + QUICK SORT
// Counting sort on columns, then quicksort on rows
// in each column
// (Turns out this is counting sort rather than bucket sort,
// which I initially thought, hence the buck_* naming.)
gettimeofday(&tic, NULL);
buck_size = __MALLOC(N*sizeof(int));
idx_in_array = __MALLOC(numel*sizeof(int));
int* restrict buck_count = __MALLOC(N*sizeof(int));
int* restrict buck_pos = __MALLOC(N*sizeof(int));
int buck_idx,new_idx;
// Init lists
for(i=0;i<N;i++)
{
buck_size[i]=0;
buck_count[i]=0;
}
// Count number of elements in each bucket (column)
for(i=0;i<numel;i++)
{
buck_idx = col[i];
buck_size[buck_idx]++;
}
// Cumulative addition to get locations of each bucket after sort,
// + save largest bucket size for later.
buck_pos[0] = 0;
for(i=1;i<N;i++)
{
buck_pos[i] = buck_pos[i-1]+buck_size[i-1];
}
// Assign each element to a bucket, store permutations in idx_in_array
int* restrict rowtmp = __MALLOC(numel*sizeof(int));
for(i=0;i<numel;i++)
{
buck_idx = col[i];
new_idx = buck_pos[buck_idx] + buck_count[buck_idx];
idx_in_array[ new_idx ] = i;
buck_count[buck_idx]++;
}
__FREE(buck_count); // Free counter
// Sort rows using permutations
// (work-shared)
#ifdef _OPENMP
#pragma omp parallel for default(shared)
#endif
for(i=0;i<numel;i++)
rowtmp[i] = row[ idx_in_array[i] ];
__FREE(row);
row = rowtmp;
if(nlhs==1)
{
__FREE(col); // Free column list if only returning matrix,
}
else
{
// else sort columns too.
// Could be done faster with bucket info, but sorted columns are
// not needed for real application.
int* restrict coltmp = __MALLOC(numel*sizeof(int));
for(i=0;i<numel;i++)
coltmp[i] = col[ idx_in_array[i] ];
__FREE(col);
col = coltmp;
}
gettimeofday(&toc,NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
__PRINTF("[RSRC] Counting sort of cols finished in %.3f seconds.\n", time_spent);
gettimeofday(&tic,NULL);
// Quicksort on buckets
// Each bucket contains a compressed column.
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) default(none) shared(buck_pos,buck_size,idx_in_array,row)
#endif
for(buck_idx=0;buck_idx<N;buck_idx++)
{
int begin = buck_pos[buck_idx];
int size = buck_size[buck_idx];
quicksort(row, idx_in_array, begin, begin+size-1) ;
}
__FREE(buck_pos); // Free bucket list
gettimeofday(&toc,NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
__PRINTF("[RSRC] Quicksort of rows finished in %.3f seconds.\n", time_spent);
// Set return pointers
*row_p = row;
*col_p = col;
*buck_size_p = buck_size;
*idx_in_array_p = idx_in_array;
*numel_p = numel;
}
//============ QUICKSORT ROUTINE
// Applies quicksort on an interval (m,n) of *list,
// performs the same permutations on *slave.
// Uses a private stack instead of making recursive calls.
static void quicksort(int* restrict list, int* restrict slave, int m, int n) {
#define MAX_LEVELS 64
int beg[MAX_LEVELS], end[MAX_LEVELS]; // Stack
int key,i,j,k,s,tmp;
s=0;
beg[0]=m;
end[0]=n;
while (s>=0)
{ // While work in stack, pop
m=beg[s];
n=end[s];
if (m<n)
{
k = m+(n-m)/2; // Choose middle for pivot
SWAP(list[m],list[k]); // Swap out pivot
SWAP(slave[m],slave[k]);
// Do quicksort
key = list[m];
i = m+1;
j = n;
while(i <= j)
{
while((i <= n) && (list[i] <= key))
i++;
while((j >= m) && (list[j] > key))
j--;
if( i < j)
{
SWAP(list[i],list[j]);
SWAP(slave[i],slave[j]);
}
}
// Swap in pivot at right place
SWAP(list[m],list[j]);
SWAP(slave[m],slave[j]);
if(s == MAX_LEVELS-1) // Stack full
{
__PRINTF("ERROR. Quicksort reached MAX_LEVELS\n");
return;
}
// Recursively sort the lesser list
beg[s] = m;
end[s] = j-1;
beg[s+1]=j+1;
end[s+1]=n;
s += 1;
// Do shortest interval first to limit stack use
if (end[s]-beg[s]>end[s-1]-beg[s-1])
{
SWAP(beg[s],beg[s-1]);
SWAP(end[s],end[s-1]);
}
}
else
{
s--;
}
}
}
//============ Home-brewed barrier
static void barrier(int bar_num, int *barrier_in, int *barrier_out, int *num_procs)
{
#ifdef _OPENMP
//int tnum = omp_get_thread_num();
// Barrrier arrive
#pragma omp critical
{
barrier_in[bar_num]++; // Announce you arrived at barrier
//__PRINTF("[%d] Reached barrier %d (%d,%d) \n", tnum, bar_num, barrier_in[bar_num], *num_procs);
}
// Barrier spin
while(barrier_in[bar_num] < *num_procs) {
#pragma omp flush
};
// Barrier depart
#pragma omp critical
{
barrier_out[bar_num]++; // Anounce you passed barrier
//__PRINTF("[%d] Passed barrier %d (%d,%d) \n", tnum, bar_num, barrier_out[bar_num], *num_procs);
}
// Barrier reset
#pragma omp critical
{
if (barrier_out[bar_num] == barrier_in[bar_num])
{
//__PRINTF("[%d] Everybody passed barrier %d. \n",tnum, bar_num);
barrier_in[bar_num] = 0;
barrier_out[bar_num] = 0;
}
}
#endif
}
// ******************************** compute_rsrc_direct ******************
// ***********************************************************************
// Transpose vector
void transpose(const double* restrict in, double* restrict out, const int N)
{
for(int i=0; i<N; i++)
{
for(int j=0; j<3; j++)
{
out[i*3+j] = in[i+j*N];
}
}
}
// Empty buffer used in direct computation
static void compute_buffer_direct(
double* restrict C,
double* restrict D,
double* restrict buf_rsq,
const int buf_cnt,
const double xi,
const int* restrict buf_idx_t,
const double* restrict buf_xr,
const double* restrict nvec,
const double* restrict fvec,
double* restrict ns,
double* restrict fs,
double* restrict phi,
double* restrict phi_idx_s)
{
int idx_t;
double xr[3],nt[3],ft[3];
// Do delayed calculations
op_A_CD(C,D,buf_rsq,buf_cnt,xi);
// Save interactions
for(int idx_buf=0;idx_buf<buf_cnt;idx_buf++)
{
idx_t = buf_idx_t[idx_buf];
for(int i=0;i<3;i++)
{
xr[i] = buf_xr[3*idx_buf+i];
// Target point normal vector
nt[i] = nvec[idx_t*3+i];
// Target point distribution density
ft[i] = fvec[idx_t*3+i];
}
// Calculate interactions t->s and s<-t
double phi_idx_t[3] = {0.0,0.0,0.0};
op_A_comp_symm_CD(xr,phi_idx_s,phi_idx_t,ns,nt,fs,ft,xi,C[idx_buf],D[idx_buf]);
for(int i=0; i<3; i++)
phi[idx_t*3+i] += phi_idx_t[i];
}
}
// ==== Compute result directly
// Do not build sparse matrix
void compute_rsrc_direct (const double* restrict x_in,
const double* restrict nvec_in,
const double* restrict fvec_in,
const int N,
const double* restrict box,
const double xi,
const double rc,
double* restrict *phi_p
)
{
struct timeval tic, toc;
gettimeofday(&tic, NULL);
double time_spent;
// Fix input (legacy format gives bad memory access)
double* restrict x = __MALLOC(3*N*sizeof(double));
double* restrict nvec = __MALLOC(3*N*sizeof(double));
double* restrict fvec = __MALLOC(3*N*sizeof(double));
transpose(x_in, x, N);
transpose(fvec_in, fvec, N);
transpose(nvec_in, nvec, N);
gettimeofday(&toc, NULL);
double time_tr = DELTA(tic,toc);
// Setup output
double* restrict phi_out = __MALLOC(3*N*sizeof(double));
for(int i=0;i<3*N;i++)
phi_out[i] = 0.0;
// Setup variables
int ncell[3];
int* restrict cell_list;
int* restrict cell_idx;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
// Build cell list
gettimeofday(&tic, NULL);
build_cell_list(x, N, box, rc, &rn, ncell, &cell_list, &cell_idx);
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] MATRIX-FREE\n");
__PRINTF("[RSRC] %s, xi=%g\n", OP_TAG, xi);
__PRINTF("[RSRC] rc=%.3f, rn=%.3f\n", rc, rn);
__PRINTF("[RSRC] box=(%g,%g,%g), ncell=(%d,%d,%d)\n",
box[0],box[1],box[2],
ncell[0],ncell[1],ncell[2]);
__PRINTF("[RSRC] Cell list built in %.3f seconds.\n", time_spent);
}
gettimeofday(&tic, NULL);
#ifdef _OPENMP
#pragma omp parallel \
shared(phi_out,box,x,nvec,fvec,cell_list,cell_idx, \
px,py,pz,ncell,rn) \
default(none)
#endif
{ // Begin parallel section
// Setup local output
double* restrict phi;
CRITICAL {
phi = __MALLOC(3*N*sizeof(double));
}
for(int i=0;i<3*N;i++)
phi[i] = 0.0;
int i,j;
int icell_idx;
int icell[3], home_cell[3];
int idx_s,idx_t,ip;
double rsq;
double pshift[3], xs[3], ns[3], fs[3], xr[3];
const double rcsq = rc*rc;
// Allocate a bufffer of interactions to be written
// into triplet list
const int buf_size = 256;
int buf_cnt = 0;
int buf_idx_t[buf_size];
double buf_xr[3*buf_size];
double buf_rsq[buf_size];
double C[buf_size];
double D[buf_size];
int num_procs = 1;
#ifdef _OPENMP
num_procs = omp_get_num_threads();
if(VERBOSE)
{
#pragma omp master
__PRINTF("[RSRC] Running on %d threads.\n",num_procs);
}
#pragma omp for schedule(dynamic) nowait
#endif
// Loop over all points (work-shared)
for(idx_s=0;idx_s<N;idx_s++)
{
double phi_idx_s[3] = {0.0, 0.0, 0.0};
for(i=0; i<3; i++)
{
// Source point
xs[i] = x[idx_s*3+i];
// Source point normal vector
ns[i] = nvec[idx_s*3+i];
// Source point distribution density
fs[i] = fvec[idx_s*3+i];
// Determine home cell
home_cell[i] = xs[i]/rn;
}
// Iterate through near cells (including home cell)
for(ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Periodic wrap
for(j=0; j<3; j++)
{
// (Could do this with mod)
pshift[j] = 0;
if(icell[j] >= ncell[j])
{
icell[j] = 0;
pshift[j] = box[j];
}
else if(icell[j]<0)
{
icell[j] = ncell[j]-1;
pshift[j] = -box[j];
}
}
icell_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
int cell_a = cell_idx[icell_idx];
int cell_b = cell_idx[icell_idx+1];
for(int point_idx=cell_a; point_idx<cell_b; point_idx++)
{
idx_t = cell_list[point_idx];
if(idx_t > idx_s)
{
// r points from s to t
for(j=0; j<3; j++)
xr[j] = x[idx_t*3+j] + pshift[j] - xs[j];
// Check if we are within truncation radius
rsq = xr[0]*xr[0] + xr[1]*xr[1] + xr[2]*xr[2];
if(rsq <= rcsq)
{
// Yes, so put interaction in buffer
buf_idx_t[buf_cnt] = idx_t;
buf_rsq[buf_cnt] = rsq;
for(i=0;i<3;i++)
buf_xr[3*buf_cnt+i] = xr[i];
buf_cnt++;
}
}
// Empty buffer if full
if (buf_cnt==buf_size)
{
compute_buffer_direct(C,D,buf_rsq,buf_cnt,xi,buf_idx_t,buf_xr,nvec,fvec,ns,fs,phi,phi_idx_s);
buf_cnt = 0;
}
} // End of neighbours in this cell
} // End of cells
// Empty buffer before writing phi_s
compute_buffer_direct(C,D,buf_rsq,buf_cnt,xi,buf_idx_t,buf_xr,nvec,fvec,ns,fs,phi,phi_idx_s);
buf_cnt = 0;
// Save additions to point s
for(int i=0; i<3; i++)
phi[idx_s*3+i] += phi_idx_s[i];
} // End of particles
#ifdef _OPENMP
// Yes, this reduction is probably crap HPC-wise,
// but it works well on my quad core right now.
struct timeval tic_red, toc_red;
#pragma omp master
gettimeofday(&tic_red, NULL);
for(i=0; i<3*N; i++)
{
#pragma omp atomic
phi_out[i] += phi[i];
}
#pragma omp master
{
gettimeofday(&toc_red, NULL);
double time_spent = DELTA(tic_red,toc_red);
if(VERBOSE)
__PRINTF("[RSRC] Reduction took %.3f seconds.\n", time_spent);
}
// free/malloc not thread safe under MEX
CRITICAL {
__FREE(phi);
}
#else
__FREE(phi_out);
phi_out = phi;
#endif
} // End parallel section
gettimeofday(&toc, NULL);
time_spent = DELTA(tic,toc);
gettimeofday(&tic, NULL);
__FREE(cell_list);
__FREE(cell_idx);
__FREE(x);
__FREE(nvec);
__FREE(fvec);
double* restrict phi_tr = __MALLOC(3*N*sizeof(double));
for(int i=0; i<N; i++)
{
for(int j=0; j<3; j++)
{
phi_tr[i+j*N] = phi_out[i*3+j];
}
}
__FREE(phi_out);
gettimeofday(&toc, NULL);
time_tr += DELTA(tic,toc);
if(VERBOSE)
{
__PRINTF("[RSRC] Transpose time: %.3f seconds.\n", time_tr);
__PRINTF("[RSRC] phi computed in %.3f seconds.\n", time_spent);
}
*phi_p = phi_tr;
}
|
14_vector_cross_product_size_n.c | /*
Program : 14
Author : Debottam
Topic : Write a C program using OpenMP features to find the cross product
of two vectors of size n each in constant time complexity.
[Hint: Cross product C[i] = (A[i]*B[i])]
*/
#include <stdio.h>
#include <omp.h>
#define N 3
int main()
{
int A[]={3,-5,4},i;
int B[]={2,6,5},C[N],D=0;
int m= omp_get_num_procs();
omp_set_num_threads(m);
#pragma omp parallel for shared(C) private(i)
for(i=0;i<N;i++)
{
C[i]=A[(i+1)%N]*B[(i+2)%N]-A[(i+2)%N]*B[(i+1)%N];
}
printf("Cross product, C = ");
for (i= 0; i< N; i++)
printf("%d\t",C[i]);
printf("\n");
return 0;
} |
irbuilder_unroll_partial_heuristic.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_partial_heuristic(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]]
// CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]]
// CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_heuristic(float *a, float *b, float *c, float *d) {
#pragma omp unroll partial
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
|
oyranos_cmm_oyra_image_expose.c | /** @file oyranos_cmm_oyra_image_expose.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2016 (C) Kai-Uwe Behrmann
*
* @brief expose module for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2016/04/11
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi7_s.h"
#include "oyCMMui_s.h"
#include "oyConnectorImaging_s.h"
#include "oyRectangle_s.h"
#include "oyRectangle_s_.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_helper.h"
#include "oyranos_i18n.h"
#include "oyranos_string.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <stdint.h> /* UINT32_MAX */
#endif
/* OY_IMAGE_EXPOSE_REGISTRATION */
/* OY_IMAGE_EXPOSE_REGISTRATION ----------------------------------------------*/
void oySensibleClip ( double * c, icColorSpaceSignature sig, int range_max, double expose )
{
int max = 0, max_pos,
mid, mid_pos,
min = range_max, min_pos,
i,
n = oyICCColorSpaceGetChannelCount(sig);
if(sig == icSigLabData ||
sig == icSigYCbCrData)
n = 1;
for(i = 0; i < n; ++i)
{
if(max < c[i]) { max = c[i]; max_pos = i; }
if(min > c[i]) { min = c[i]; min_pos = i; }
}
if( min * expose > range_max)
for(i = 0; i < n; ++i)
c[i] = range_max;
else if(max * expose <= range_max)
for(i = 0; i < n; ++i)
c[i] *= expose;
else if(n > 1)
{
double exposed_min = min * expose;
double mid_part;
double exposed_mid;
mid_pos = min_pos != 0 && max_pos != 0 ? 0 : min_pos != 1 && max_pos != 1 ? 1 : 2;
mid = c[mid_pos];
mid_part = (double)( mid - min )/(double)( max - min );
c[min_pos] = exposed_min + 0.5;
exposed_mid = exposed_min + mid_part * (range_max - exposed_min);
c[mid_pos] = exposed_mid + 0.5;
c[max_pos] = range_max;
}
}
/** @func oyraFilter_ImageExposeRun
* @brief implement oyCMMFilter_GetNext_f()
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2013/06/10 (Oyranos: 0.9.5)
*/
int oyraFilter_ImageExposeRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int result = 0, error = 0;
oyFilterSocket_s * socket = 0;
oyFilterNode_s * input_node = 0,
* node = 0;
oyFilterPlug_s * plug = 0;
oyImage_s * image = 0;
int dirty = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!image)
{
result = 1;
goto clean_expose1;
}
if(oy_debug)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) );
{
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
double expose = 1.0;
oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 );
if(!node_opts)
dirty = 1;
if(dirty)
{
result = dirty;
goto clean_expose2;
}
plug = oyFilterNode_GetPlug( node, 0 );
/* select node */
input_node = oyFilterNode_GetPlugNode( node, 0 );
/* find filters own expose factor */
error = oyOptions_FindDouble( node_opts,
"//" OY_TYPE_STD "/expose/expose",
0, &expose );
if(error) WARNc2_S("%s %d", _("found issues"),error);
if(oy_debug > 2)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s expose: %f",OY_DBG_ARGS_, oyPixelAccess_Show(ticket), expose);
if(expose != 1.0)
{
oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket );
oyArray2d_s * array_out = oyPixelAccess_GetArray( ticket );
oyProfile_s * p = oyImage_GetProfile( output_image );
icColorSpaceSignature sig = oyProfile_GetSignature( p, oySIGNATURE_COLOR_SPACE );
int layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT );
int channels_dst = oyToChannels_m( layout_dst );
int byte_swap = oyToByteswap_m( layout_dst );
int ticket_array_pix_width;
/* avoid division by zero */
if(!channels_dst) channels_dst = 1;
ticket_array_pix_width = oyArray2d_GetWidth( array_out ) / channels_dst;
{
int w,h,x,y, i, start_x,start_y;
unsigned int max = 1;
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
oyRectangle_s_ roi_= {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * roi = (oyRectangle_s*)&roi_;
uint8_t ** array_out_data;
/* get pixel layout infos for copying */
oyDATATYPE_e data_type_out = oyToDataType_m( layout_dst );
int bps_out = oyDataTypeGetSize( data_type_out );
/* get the source pixels */
result = oyFilterNode_Run( input_node, plug, ticket );
/* get the channel buffers */
array_out_data = oyArray2d_GetData( array_out );
w = oyArray2d_GetWidth( array_out ) / channels_dst;
h = oyArray2d_GetHeight( array_out );
oyRectangle_SetByRectangle( roi, ticket_roi );
oyRectangle_Scale( roi, ticket_array_pix_width );
start_x = OY_ROUND(roi_.x);
start_y = OY_ROUND(roi_.y);
switch(data_type_out)
{
case oyUINT8: max = 255; break;
case oyUINT16: max = 65535; break;
case oyUINT32: max = UINT32_MAX; break;
default: break;
}
/* expose the samples */
#if defined(USE_OPENMP)
#pragma omp parallel for private(x,y,i)
#endif
for(y = start_y; y < h; ++y)
{
for(x = start_x; x < w; ++x)
{
if( (sig == icSigRgbData ||
sig == icSigXYZData ||
sig == icSigLabData ||
sig == icSigYCbCrData)
&& channels_dst >= 3)
{
double rgb[3], v;
for(i = 0; i < 3; ++i)
{
switch(data_type_out)
{
case oyUINT8:
rgb[i] = array_out_data[y][x*channels_dst*bps_out + i*bps_out];
break;
case oyUINT16:
{
uint16_t v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt16(v);
rgb[i] = v;
}
break;
case oyUINT32:
{
uint32_t v = *((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt32(v);
rgb[i] = v;
}
break;
case oyHALF:
v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
case oyFLOAT:
v = *((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
case oyDOUBLE:
v = *((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
}
}
oySensibleClip ( rgb, sig, max, expose );
for(i = 0; i < 3; ++i)
{
v = rgb[i];
switch(data_type_out)
{
case oyUINT8:
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = v;
break;
case oyUINT16:
{ uint16_t u16 = v;
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(u16) : u16;
}
break;
case oyUINT32:
{ uint32_t u32 = v;
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(u32) : u32;
}
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
}
}
}
else
for(i = 0; i < channels_dst; ++i)
{
int v;
switch(data_type_out)
{
case oyUINT8:
v = array_out_data[y][x*channels_dst*bps_out + i*bps_out] * expose;
if(v > 255) v = 255;
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = v;
break;
case oyUINT16:
v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt16(v);
v *= expose;
if(v > 65535) v = 65535;
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(v) : v;
break;
case oyUINT32:
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
}
}
}
}
}
oyArray2d_Release( &array_out );
oyImage_Release( &output_image );
oyProfile_Release( &p );
} else /* expose == 1.0 */
{
result = oyFilterNode_Run( input_node, plug, ticket );
}
clean_expose2:
oyOptions_Release( &node_opts );
oyFilterPlug_Release( &plug );
oyRectangle_Release( &ticket_roi );
oyFilterNode_Release( &input_node );
}
clean_expose1:
oyImage_Release( &image );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &node );
return result;
}
#define OY_IMAGE_EXPOSE_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "expose"
/** @instance oyra_api7
* @brief oyra oyCMMapi7_s implementation
*
* a filter providing a expose image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi7ImageExposeCreate(void)
{
oyCMMapi7_s * expose7;
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32,
oyHALF, oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s * plug = oyConnectorImaging_New(0),
* socket = oyConnectorImaging_New(0);
static oyConnectorImaging_s * plugs[2] = {0,0},
* sockets[2] = {0,0};
plugs[0] = plug;
sockets[0] = socket;
oyConnectorImaging_SetDataTypes( plug, data_types, 6 );
oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( plug, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 );
oyConnectorImaging_SetDataTypes( socket, data_types, 6 );
oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( socket, 0 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 );
expose7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_EXPOSE_REGISTRATION,
cmm_version, module_api,
NULL,
oyraFilter_ImageExposeRun,
(oyConnector_s**)plugs, 1, 0,
(oyConnector_s**)sockets, 1, 0,
0, 0 );
return (oyCMMapi_s*) expose7;
}
const char * oyraApi4UiImageExposeGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "image_expose";
else if(type == oyNAME_NAME)
return _("Image[expose]");
else if(type == oyNAME_DESCRIPTION)
return _("Expose Image Filter Object");
} else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The filter adapts pixel brightness.");
else if(type == oyNAME_DESCRIPTION)
{
static char * help_desc = NULL;
if(!help_desc)
oyStringAddPrintf( &help_desc, 0,0, "%s",
_("The filter expects a \"expose\" double option and will process the data accordingly.")
);
return help_desc;
}
} else if(strcmp(select,"category") == 0)
{
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return _("Image/Simple Image[expose]");
else if(type == oyNAME_DESCRIPTION)
return _("The filter is used to reduce pixels.");
}
return 0;
}
/** @instance oyra_api4
* @brief oyra oyCMMapi4_s implementation
*
* a filter providing a expose image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi4ImageExposeCreate(void)
{
static const char * oyra_api4_ui_image_expose_texts[] = {"name", "help", "category", 0};
oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[expose]", /* category */
oyraApi4UiImageExposeGetText,
oyra_api4_ui_image_expose_texts, 0 );
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
oyCMMapi4_s * expose4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_EXPOSE_REGISTRATION,
cmm_version, module_api,
NULL,
NULL,
NULL,
ui,
NULL );
return (oyCMMapi_s*)expose4;
}
/* OY_IMAGE_EXPOSE_REGISTRATION ----------------------------------------------*/
/* ---------------------------------------------------------------------------*/
|
GB_binop__le_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int32)
// A*D function (colscale): GB (_AxD__le_int32)
// D*A function (rowscale): GB (_DxB__le_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int32)
// C=scalar+B GB (_bind1st__le_int32)
// C=scalar+B' GB (_bind1st_tran__le_int32)
// C=A+scalar GB (_bind2nd__le_int32)
// C=A'+scalar GB (_bind2nd_tran__le_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in-place, in which case C or
// A are modified in-place.
// There are two ways to use this method:
// C = A' C and A are different
// C = C' C is transposed in-place, (C==A aliased)
// In both cases, the header for C and A must already be allocated (either
// static or dynamic). A is never modified, unless C==A. C and A cannot be
// NULL on input. If in place (C == A) then C and A is a valid matrix on input
// (the input matrix A). If C != A, the contents of C are not defined on input,
// and any prior content is freed. Either header may be static or dynamic.
// The input matrix A may have shallow components (even if in-place), and the
// output C may also have shallow components (even if the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. The GB_builder method is more
// scalable, but not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_WORKSPACE \
{ \
GB_FREE (&iwork, iwork_size) ; \
GB_FREE (&jwork, jwork_size) ; \
GB_FREE (&Swork, Swork_size) ; \
GB_WERK_POP (Count, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (T) ; \
/* freeing C also frees A if transpose is done in-place */ \
GB_phbix_free (C) ; \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GrB_Info GB_transpose // C=A', C=(ctype)A' or C=op(A')
(
GrB_Matrix C, // output matrix C, possibly modified in-place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A, // input matrix; C == A if done in place
// no operator is applied if op is NULL
const GB_Operator op_in, // unary/idxunop/binop to apply
const GrB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
bool flipij, // if true, flip i,j for user idxunop
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in-place
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL) ;
ASSERT (A != NULL) ;
bool in_place = (A == C) ;
struct GB_Matrix_opaque T_header ;
GrB_Matrix T = GB_clear_static_header (&T_header) ;
GB_WERK_DECLARE (Count, int64_t) ;
int64_t *iwork = NULL ; size_t iwork_size = 0 ;
int64_t *jwork = NULL ; size_t jwork_size = 0 ;
GB_void *Swork = NULL ; size_t Swork_size = 0 ;
ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ;
ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ;
ASSERT_OP_OK_OR_NULL (op_in, "unop/binop for GB_transpose", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ;
if (in_place)
{
GBURBLE ("(in-place transpose) ") ;
}
// get the current sparsity control of A
float A_hyper_switch = A->hyper_switch ;
float A_bitmap_switch = A->bitmap_switch ;
int A_sparsity_control = A->sparsity_control ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
// wait if A has pending tuples or zombies; leave jumbled unless avdim == 1
if (GB_PENDING (A) || GB_ZOMBIES (A) || (avdim == 1 && GB_JUMBLED (A)))
{
GB_OK (GB_wait (A, "A", Context)) ;
}
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_IMPLIES (avdim == 1, !GB_JUMBLED (A))) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
bool A_is_bitmap = GB_IS_BITMAP (A) ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
int64_t anz = GB_nnz (A) ;
int64_t anz_held = GB_nnz_held (A) ;
int64_t anvec = A->nvec ;
int64_t anvals = A->nvals ;
//--------------------------------------------------------------------------
// determine the max number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// determine the type of C and get the unary, idxunop, binary operator
//--------------------------------------------------------------------------
// If a unary, idxunop, or binary operator is present, C is always returned
// as the ztype of the operator. The input ctype is ignored.
GB_Operator op = NULL ;
GB_Opcode opcode = GB_NOP_code ;
if (op_in == NULL)
{
// no operator
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
else
{
opcode = op_in->opcode ;
if (GB_IS_UNARYOP_CODE (opcode))
{
// get the unary operator
if (atype == op_in->xtype && opcode == GB_IDENTITY_unop_code)
{
// op is a built-in unary identity operator, with the same type
// as A, so do not apply the operator and do not typecast. op
// is NULL.
ctype = atype ;
}
else
{
// apply the operator, z=unop(x)
op = op_in ;
ctype = op->ztype ;
}
}
else // binary or idxunop
{
// get the binary or idxunop operator: only GB_apply calls
// GB_transpose with op_in, and it ensures this condition holds:
// first(A,y), second(x,A) have been renamed to identity(A), and
// PAIR has been renamed one(A), so these cases do not occur here.
ASSERT (!((opcode == GB_PAIR_binop_code) ||
(opcode == GB_FIRST_binop_code && !binop_bind1st) ||
(opcode == GB_SECOND_binop_code && binop_bind1st))) ;
// apply the operator, z=binop(A,y), binop(x,A), or idxunop(A,y)
op = op_in ;
ctype = op->ztype ;
}
}
bool user_idxunop = (opcode == GB_USER_idxunop_code) ;
//--------------------------------------------------------------------------
// check for positional operators
//--------------------------------------------------------------------------
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
GB_Operator save_op = op ;
if (op_is_positional)
{
// do not apply the positional op until after the transpose;
// replace op with the ONE operator, as a placeholder. C will be
// constructed as iso, and needs to be expanded to non-iso when done.
ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32 || ctype == GrB_BOOL) ;
op = (GB_Operator) GB_unop_one (ctype->code) ;
}
else if (user_idxunop)
{
// do not apply the user op until after the transpose; replace with
// no operator at all, with no typecast
op = NULL ;
ctype = atype ;
}
//--------------------------------------------------------------------------
// determine the iso status of C
//--------------------------------------------------------------------------
size_t csize = ctype->size ;
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
GB_iso_code C_code_iso = GB_iso_unop_code (A, op, binop_bind1st) ;
bool C_iso = (C_code_iso != GB_NON_ISO) ;
ASSERT (GB_IMPLIES (A->iso, C_iso)) ;
if (C_iso && !op_is_positional)
{
GBURBLE ("(iso transpose) ") ;
}
else
{
GBURBLE ("(transpose) ") ;
}
//==========================================================================
// T = A', T = (ctype) A', or T = op (A')
//==========================================================================
if (anz == 0)
{
//----------------------------------------------------------------------
// A is empty
//----------------------------------------------------------------------
// create a new empty matrix T, with the new type and dimensions.
// set T->iso = false OK
GB_OK (GB_new_bix (&T, true, // hyper, static header
ctype, avdim, avlen, GB_Ap_calloc, C_is_csc, GxB_HYPERSPARSE,
true, A_hyper_switch, 1, 1, true, false, Context)) ;
}
else if (A_is_bitmap || GB_as_if_full (A))
{
//----------------------------------------------------------------------
// transpose a bitmap/as-if-full matrix or vector
//----------------------------------------------------------------------
// A is either bitmap or as-is-full (full, or sparse or hypersparse
// with all entries present, no zombies, no pending tuples, and not
// jumbled). T = A' is either bitmap or full.
int T_sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ;
bool T_cheap = // T can be done quickly if:
(avlen == 1 || avdim == 1) // A is a row or column vector,
&& op == NULL // no operator to apply,
&& atype == ctype ; // and no typecasting
// allocate T
if (T_cheap)
{
// just initialize the static header of T, not T->b or T->x
GBURBLE ("(cheap transpose) ") ;
info = GB_new (&T, true, // bitmap or full, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
T_sparsity, A_hyper_switch, 1, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// allocate all of T, including T->b and T->x
// set T->iso = C_iso OK
GB_OK (GB_new_bix (&T, true, // bitmap or full, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc, T_sparsity, true,
A_hyper_switch, 1, anz_held, true, C_iso, Context)) ;
}
T->magic = GB_MAGIC ;
if (T_sparsity == GxB_BITMAP)
{
T->nvals = anvals ; // for bitmap case only
}
//----------------------------------------------------------------------
// T = A'
//----------------------------------------------------------------------
int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ;
if (T_cheap)
{
// no work to do. Transposing does not change A->b or A->x
T->b = A->b ; T->b_size = A->b_size ;
T->x = A->x ; T->x_size = A->x_size ;
if (in_place)
{
// transplant A->b and A->x into T
T->b_shallow = A->b_shallow ;
T->x_shallow = A->x_shallow ;
A->b = NULL ;
A->x = NULL ;
}
else
{
// T is a purely shallow copy of A
T->b_shallow = (A->b != NULL) ;
T->x_shallow = true ;
}
T->iso = A->iso ; // OK
}
else if (op == NULL)
{
// do not apply an operator; optional typecast to T->type
GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ;
}
else
{
// apply an operator, T has type op->ztype
GB_transpose_op (T, C_code_iso, op, scalar, binop_bind1st, A,
NULL, NULL, 0, nthreads) ;
}
ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ;
ASSERT (!GB_JUMBLED (T)) ;
}
else if (avdim == 1)
{
//----------------------------------------------------------------------
// transpose a "column" vector into a "row"
//----------------------------------------------------------------------
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be sorted first.
ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ;
ASSERT (!GB_JUMBLED (A)) ;
//----------------------------------------------------------------------
// allocate T
//----------------------------------------------------------------------
// Initialized the header of T, with no content, and initialize the
// type and dimension of T. T is hypersparse.
info = GB_new (&T, true, // hyper; static header
ctype, 1, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
ASSERT (info == GrB_SUCCESS) ;
// allocate T->p, T->i, and optionally T->x, but not T->h
T->p = GB_MALLOC (anz+1, int64_t, &(T->p_size)) ;
T->i = GB_MALLOC (anz , int64_t, &(T->i_size)) ;
bool allocate_Tx = (op != NULL || C_iso) || (ctype != atype) ;
if (allocate_Tx)
{
// allocate new space for the new typecasted numerical values of T
T->x = GB_XALLOC (false, C_iso, anz, csize, &(T->x_size)) ; // x:OK
}
if (T->p == NULL || T->i == NULL || (allocate_Tx && T->x == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of T: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op != NULL || C_iso)
{
// T->x = unop (A), binop (A,scalar), or binop (scalar,A), or
// compute the iso value of T = 1, A, or scalar, without any op
info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op,
scalar, binop_bind1st, flipij, A, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else if (ctype != atype)
{
// copy the values from A into T and cast from atype to ctype
GB_cast_matrix (T, A, Context) ;
}
else
{
// no type change; numerical values of T are a shallow copy of A.
ASSERT (!allocate_Tx) ;
T->x = A->x ; T->x_size = A->x_size ;
if (in_place)
{
// transplant A->x as T->x
T->x_shallow = A->x_shallow ;
A->x = NULL ;
}
else
{
// T->x is a shallow copy of A->x
T->x_shallow = true ;
}
}
// each entry in A becomes a non-empty vector in T;
// T is a hypersparse 1-by-avlen matrix
// transplant or shallow-copy A->i as the new T->h
T->h = A->i ; T->h_size = A->i_size ;
if (in_place)
{
// transplant A->i as T->h
T->h_shallow = A->i_shallow ;
A->i = NULL ;
}
else
{
// T->h is a shallow copy of A->i
T->h_shallow = true ;
}
// T->p = 0:anz and T->i = zeros (1,anz), newly allocated
T->plen = anz ;
T->nvec = anz ;
T->nvec_nonempty = anz ;
// fill the vector pointers T->p
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < anz ; k++)
{
T->i [k] = 0 ;
T->p [k] = k ;
}
T->p [anz] = anz ;
T->iso = C_iso ;
T->magic = GB_MAGIC ;
}
else if (avlen == 1)
{
//----------------------------------------------------------------------
// transpose a "row" into a "column" vector
//----------------------------------------------------------------------
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ;
//----------------------------------------------------------------------
// allocate workspace, if needed
//----------------------------------------------------------------------
int ntasks = 0 ;
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
if (nth > 1 && !A_is_hyper)
{
// ntasks and Count are not needed if nth == 1
ntasks = 8 * nth ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
GB_WERK_PUSH (Count, ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
}
// Allocate the header of T, with no content
// and initialize the type and dimension of T.
info = GB_new (&T, true, // sparse; static header
ctype, avdim, 1, GB_Ap_null, C_is_csc,
GxB_SPARSE, A_hyper_switch, 0, Context) ;
ASSERT (info == GrB_SUCCESS) ;
T->iso = C_iso ; // OK
// allocate new space for the values and pattern
T->p = GB_CALLOC (2, int64_t, &(T->p_size)) ;
if (!A_is_hyper)
{
// A is sparse, so new space is needed for T->i
T->i = GB_MALLOC (anz, int64_t, &(T->i_size)) ;
}
bool allocate_Tx = (op != NULL || C_iso) || (ctype != atype) ;
if (allocate_Tx)
{
// allocate new space for the new typecasted numerical values of T
T->x = GB_XALLOC (false, C_iso, anz, csize, &(T->x_size)) ; // x:OK
}
if (T->p == NULL || (T->i == NULL && !A_is_hyper) ||
(T->x == NULL && allocate_Tx))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of T: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op != NULL || C_iso)
{
// T->x = unop (A), binop (A,scalar), or binop (scalar,A), or
// compute the iso value of T = 1, A, or scalar, without any op
info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op,
scalar, binop_bind1st, flipij, A, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else if (ctype != atype)
{
// copy the values from A into T and cast from atype to ctype
GB_cast_matrix (T, A, Context) ;
}
else
{
// no type change; numerical values of T are a shallow copy of A.
ASSERT (!allocate_Tx) ;
T->x = A->x ; T->x_size = A->x_size ;
if (in_place)
{
// transplant A->x as T->x
T->x_shallow = A->x_shallow ;
A->x = NULL ;
}
else
{
// T->x is a shallow copy of A->x
T->x_shallow = true ;
}
}
//----------------------------------------------------------------------
// compute T->i
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in T
//------------------------------------------------------------------
T->i = A->h ; T->i_size = A->h_size ;
if (in_place)
{
// transplant A->h as T->i
T->i_shallow = A->h_shallow ;
A->h = NULL ;
}
else
{
// T->i is a shallow copy of A->h
T->i_shallow = true ;
}
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in T
//------------------------------------------------------------------
if (nth == 1)
{
//--------------------------------------------------------------
// construct T->i with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (A->p [j] < A->p [j+1])
{
T->i [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct T->i in parallel
//--------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (A->p [j] < A->p [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (A->p [j] < A->p [j+1])
{
T->i [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (A->p [j] < A->p [j+1])
{
ASSERT (T->i [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
}
//---------------------------------------------------------------------
// vector pointers of T
//---------------------------------------------------------------------
// T->p = [0 anz]
ASSERT (T->plen == 1) ;
ASSERT (T->nvec == 1) ;
T->nvec_nonempty = (anz == 0) ? 0 : 1 ;
T->p [1] = anz ;
T->magic = GB_MAGIC ;
ASSERT (!GB_JUMBLED (T)) ;
}
else
{
//----------------------------------------------------------------------
// transpose a general sparse or hypersparse matrix
//----------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
int nworkspaces_bucket, nthreads_bucket ;
bool use_builder = GB_transpose_method (A,
&nworkspaces_bucket, &nthreads_bucket, Context) ;
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_builder)
{
//------------------------------------------------------------------
// transpose via GB_builder
//------------------------------------------------------------------
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
iwork = GB_MALLOC (anz, int64_t, &iwork_size) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output.
GB_OK (GB_extract_vector_list (iwork, A, Context)) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and Swork)
//------------------------------------------------------------------
// initialize the header of T, with no content
// content, and initialize the type and dimension of T.
info = GB_new (&T, true, // hyper, static header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
ASSERT (info == GrB_SUCCESS) ;
// if in_place, the prior A->p and A->h can now be freed
if (in_place)
{
if (!A->p_shallow) GB_FREE (&A->p, A->p_size) ;
if (!A->h_shallow) GB_FREE (&A->h, A->h_size) ;
}
GB_void *S_input = NULL ;
// for the GB_builder method, if the transpose is done in-place and
// A->i is not shallow, A->i can be used and then freed.
// Otherwise, A->i is not modified at all.
bool ok = true ;
bool recycle_Ai = (in_place && !A->i_shallow) ;
if (!recycle_Ai)
{
// allocate jwork of size anz
jwork = GB_MALLOC (anz, int64_t, &jwork_size) ;
ok = ok && (jwork != NULL) ;
}
if (op != NULL && !C_iso)
{
Swork = (GB_void *) GB_XALLOC (false, C_iso, anz, // x:OK
csize, &Swork_size) ;
ok = ok && (Swork != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// A->i is used as workspace for the "column" indices of C.
// jwork is A->i, and is freed by GB_builder.
jwork = A->i ;
jwork_size = A->i_size ;
A->i = NULL ;
ASSERT (in_place) ;
}
else
{
// copy A->i into jwork, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, A->i, anz * sizeof (int64_t), nthreads_max) ;
}
// numerical values: apply the op, typecast, or make shallow copy
GrB_Type stype ;
GB_void sscalar [GB_VLA(csize)] ;
if (C_iso)
{
// apply the op to the iso scalar
GB_iso_unop (sscalar, ctype, C_code_iso, op, A, scalar) ;
S_input = sscalar ; // S_input is used instead of Swork
Swork = NULL ;
stype = ctype ;
}
else if (op != NULL)
{
// Swork = op (A)
info = GB_apply_op (Swork, ctype, C_code_iso, op, scalar,
binop_bind1st, flipij, A, Context) ;
ASSERT (info == GrB_SUCCESS) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
S_input = NULL ; // Swork is used instead of S_input
stype = ctype ;
}
else
{
// GB_builder will typecast S_input from atype to ctype if
// needed. S_input is a shallow copy of Ax, and must not be
// modified.
ASSERT (!C_iso) ;
ASSERT (!A->iso) ;
S_input = (GB_void *) A->x ; // S_input is used instead of Swork
Swork = NULL ;
stype = atype ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total memory usage is anz * max (csize, sizeof(int64_t)). T is
// always hypersparse. Either T, Swork, and S_input are all iso,
// or all non-iso, depending on C_iso.
GB_OK (GB_builder (
T, // create T using a static header
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&iwork_size,
&jwork, // jwork_handle, freed on output
&jwork_size,
&Swork, // Swork_handle, freed on output
&Swork_size,
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
NULL, NULL, // original I,J indices: not used here
S_input, // array of values of type stype, not modified
C_iso, // iso property of T is the same as C->iso
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
stype, // type of S_input or Swork
Context
)) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify
// S_input.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
ASSERT (!GB_JUMBLED (T)) ;
}
else
{
//------------------------------------------------------------------
// transpose via bucket sort
//------------------------------------------------------------------
// T = A' and typecast to ctype
GB_OK (GB_transpose_bucket (T, C_code_iso, ctype, C_is_csc, A,
op, scalar, binop_bind1st,
nworkspaces_bucket, nthreads_bucket, Context)) ;
ASSERT_MATRIX_OK (T, "T from bucket", GB0) ;
ASSERT (GB_JUMBLED_OK (T)) ;
}
}
//==========================================================================
// free workspace, apply positional op, and transplant/conform T into C
//==========================================================================
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
if (in_place)
{
// free prior space of A, if transpose is done in-place
GB_phbix_free (A) ;
}
//--------------------------------------------------------------------------
// transplant T into the result C
//--------------------------------------------------------------------------
// transplant the control settings from A to C
C->hyper_switch = A_hyper_switch ;
C->bitmap_switch = A_bitmap_switch ;
C->sparsity_control = A_sparsity_control ;
GB_OK (GB_transplant (C, ctype, &T, Context)) ;
ASSERT_MATRIX_OK (C, "C transplanted in GB_transpose", GB0) ;
ASSERT_TYPE_OK (ctype, "C type in GB_transpose", GB0) ;
//--------------------------------------------------------------------------
// apply a positional operator or user idxunop after transposing the matrix
//--------------------------------------------------------------------------
op = save_op ;
if (op_is_positional)
{
if (C->iso)
{
// If C was constructed as iso; it needs to be expanded first,
// but do not initialize the values. These are computed by
// GB_apply_op below.
// set C->iso = false OK: no need to burble
GB_OK (GB_convert_any_to_non_iso (C, false, Context)) ;
}
// the positional unary op is applied in-place: C->x = op (C)
GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, op,
scalar, binop_bind1st, flipij, C, Context)) ;
}
else if (user_idxunop)
{
if (C->iso)
{
// If C was constructed as iso; it needs to be expanded and
// initialized first.
GB_OK (GB_convert_any_to_non_iso (C, true, Context)) ;
}
if (C->type == op->ztype)
{
// the user-defined index unary op is applied in-place: C->x = op
// (C) where the type of C does not change
GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, op,
scalar, binop_bind1st, flipij, C, Context)) ;
}
else // op is a user-defined index unary operator
{
// apply the operator to the transposed matrix:
// C = op (C), but not in-place since the type of C is changing
ctype = op->ztype ;
csize = ctype->size ;
size_t Cx_size = 0 ;
GB_void *Cx_new = NULL ;
if (GB_IS_BITMAP (C))
{
// calloc the space so the new C->x has no uninitialized space
Cx_new = GB_CALLOC (anz_held*csize, GB_void, &Cx_size) ; // x:OK
}
else
{
// malloc is fine; all C->x will be written
Cx_new = GB_MALLOC (anz_held*csize, GB_void, &Cx_size) ; // x:OK
}
if (Cx_new == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// Cx_new = op (C)
GB_OK (GB_apply_op (Cx_new, ctype, GB_NON_ISO, op,
scalar, false, flipij, C, Context)) ;
// transplant Cx_new as C->x and finalize the type of C
GB_FREE (&(C->x), C->x_size) ;
C->x = Cx_new ;
C->x_size = Cx_size ;
C->type = ctype ;
C->iso = false ;
}
}
//--------------------------------------------------------------------------
// conform the result to the desired sparsity structure of A
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ;
GB_OK (GB_conform (C, Context)) ;
ASSERT_MATRIX_OK (C, "C output of GB_transpose", GB0) ;
return (GrB_SUCCESS) ;
}
|
gauss_seidel_omp.c | #include<stdio.h>
#include<math.h>
#include<omp.h> //added this
# include <time.h>
#include <sys/types.h>
#include <sys/time.h>
#define ESP 0.0001
double gettime(void) {
struct timeval tval;
gettimeofday(&tval, NULL);
return( (double)tval.tv_sec + (double)tval.tv_usec/1000000.0 );
}
int main(int argc, char **argv){
double a[3][3];
double x[3], x_old[3];
double c[] = {1, 28, 76};
int i, j, k, iter, N, M;
double sum[3];
int maxiter = 6;
double Ea[3], EaMax;
double wtime1, wtime2;
int flag =0;
N=3;
M=3;
//initial guess for x[]
x[0] = 1.00;
x[1] = 0.00;
x[2] = 1.00;
x_old[0] = 1.00;
x_old[1] = 0.00;
x_old[2] = 1.00;
a[0][0] = 12.00;
a[0][1] = 3.00;
a[0][2] = -5.00;
a[1][0] = 1.00;
a[1][1] = 5.00;
a[1][2] = 3.00;
a[2][0] = 3.00;
a[2][1] = 7.00;
a[2][2] = 13.00;
for(i=0; i<M;i++){
for(j=0; j<N;j++){
printf(" %lf ", a[i][j]);
}
printf("\n");
}
wtime1 = gettime();
omp_set_num_threads(N);
for(iter = 1; iter <= maxiter; iter++){
EaMax =0.0;
flag = 0;
#pragma omp parallel for shared (a, x_old, N, M, sum, Ea ) private ( i, j )
for(i=0; i<N; i++){
sum[i] = 0.0;
for(j=(i+1); j<M; j++){
sum[i] = a[i][j] * x[j] + sum[i];
}
if (i == 0){
x[i] = (c[i] - sum[i])/a[i][i];
#pragma omp atomic update
flag++;
Ea[i] = fabs((x[i] - x_old [i])/x[i]) * 100;
x_old[i] = x[i];
}else if (i > 0){
#pragma omp flush(flag)
while(flag < i)
{
#pragma omp flush(flag)
}
for(j=0; j<i; j++){
sum[i] = sum[i] + a[i][j]*x[j];
}
x[i] = (c[i] - sum[i])/a[i][i];
#pragma omp atomic update
flag++;
Ea[i] = fabs((x[i] - x_old [i])/x[i]) * 100;
x_old[i] = x[i];
}
}
for(i = 0; i < N; i++){
if(Ea[i] > EaMax){
EaMax = Ea[i];
}
printf("\nIn iteration number: %d, thread_is: %d, X[%d]: %lf, X_old[%d]: %lf, Error: %lf, EaMax: %lf, Sum: %lf\n", iter, omp_get_thread_num(), i,x[i], i, x_old[i], Ea[i],EaMax, sum[i] );
}
printf("\nEaMax: %lf\n", EaMax);
if(EaMax < ESP){
break;
}
}
wtime2 = gettime();
printf ( "Elapsed wall clock time (seconds) %f\n", (wtime2 - wtime1) );
return 0;
}
|
pt.c | /* Handle parameterized types (templates) for GNU -*- C++ -*-.
Copyright (C) 1992-2017 Free Software Foundation, Inc.
Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing.
Rewritten by Jason Merrill (jason@cygnus.com).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Known bugs or deficiencies include:
all methods must be provided in header files; can't use a source
file that contains only the method templates and "just win". */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "cp-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "varasm.h"
#include "attribs.h"
#include "stor-layout.h"
#include "intl.h"
#include "c-family/c-objc.h"
#include "cp-objcp-common.h"
#include "toplev.h"
#include "tree-iterator.h"
#include "type-utils.h"
#include "gimplify.h"
/* The type of functions taking a tree, and some additional data, and
returning an int. */
typedef int (*tree_fn_t) (tree, void*);
/* The PENDING_TEMPLATES is a TREE_LIST of templates whose
instantiations have been deferred, either because their definitions
were not yet available, or because we were putting off doing the work. */
struct GTY ((chain_next ("%h.next"))) pending_template {
struct pending_template *next;
struct tinst_level *tinst;
};
static GTY(()) struct pending_template *pending_templates;
static GTY(()) struct pending_template *last_pending_template;
int processing_template_parmlist;
static int template_header_count;
static GTY(()) tree saved_trees;
static vec<int> inline_parm_levels;
static GTY(()) struct tinst_level *current_tinst_level;
static GTY(()) tree saved_access_scope;
/* Live only within one (recursive) call to tsubst_expr. We use
this to pass the statement expression node from the STMT_EXPR
to the EXPR_STMT that is its result. */
static tree cur_stmt_expr;
// -------------------------------------------------------------------------- //
// Local Specialization Stack
//
// Implementation of the RAII helper for creating new local
// specializations.
local_specialization_stack::local_specialization_stack ()
: saved (local_specializations)
{
local_specializations = new hash_map<tree, tree>;
}
local_specialization_stack::~local_specialization_stack ()
{
delete local_specializations;
local_specializations = saved;
}
/* True if we've recursed into fn_type_unification too many times. */
static bool excessive_deduction_depth;
struct GTY((for_user)) spec_entry
{
tree tmpl;
tree args;
tree spec;
};
struct spec_hasher : ggc_ptr_hash<spec_entry>
{
static hashval_t hash (spec_entry *);
static bool equal (spec_entry *, spec_entry *);
};
static GTY (()) hash_table<spec_hasher> *decl_specializations;
static GTY (()) hash_table<spec_hasher> *type_specializations;
/* Contains canonical template parameter types. The vector is indexed by
the TEMPLATE_TYPE_IDX of the template parameter. Each element is a
TREE_LIST, whose TREE_VALUEs contain the canonical template
parameters of various types and levels. */
static GTY(()) vec<tree, va_gc> *canonical_template_parms;
#define UNIFY_ALLOW_NONE 0
#define UNIFY_ALLOW_MORE_CV_QUAL 1
#define UNIFY_ALLOW_LESS_CV_QUAL 2
#define UNIFY_ALLOW_DERIVED 4
#define UNIFY_ALLOW_INTEGER 8
#define UNIFY_ALLOW_OUTER_LEVEL 16
#define UNIFY_ALLOW_OUTER_MORE_CV_QUAL 32
#define UNIFY_ALLOW_OUTER_LESS_CV_QUAL 64
enum template_base_result {
tbr_incomplete_type,
tbr_ambiguous_baseclass,
tbr_success
};
static void push_access_scope (tree);
static void pop_access_scope (tree);
static bool resolve_overloaded_unification (tree, tree, tree, tree,
unification_kind_t, int,
bool);
static int try_one_overload (tree, tree, tree, tree, tree,
unification_kind_t, int, bool, bool);
static int unify (tree, tree, tree, tree, int, bool);
static void add_pending_template (tree);
static tree reopen_tinst_level (struct tinst_level *);
static tree tsubst_initializer_list (tree, tree);
static tree get_partial_spec_bindings (tree, tree, tree);
static tree coerce_template_parms (tree, tree, tree, tsubst_flags_t,
bool, bool);
static tree coerce_innermost_template_parms (tree, tree, tree, tsubst_flags_t,
bool, bool);
static void tsubst_enum (tree, tree, tree);
static tree add_to_template_args (tree, tree);
static tree add_outermost_template_args (tree, tree);
static bool check_instantiated_args (tree, tree, tsubst_flags_t);
static int maybe_adjust_types_for_deduction (unification_kind_t, tree*, tree*,
tree);
static int type_unification_real (tree, tree, tree, const tree *,
unsigned int, int, unification_kind_t, int,
vec<deferred_access_check, va_gc> **,
bool);
static void note_template_header (int);
static tree convert_nontype_argument_function (tree, tree, tsubst_flags_t);
static tree convert_nontype_argument (tree, tree, tsubst_flags_t);
static tree convert_template_argument (tree, tree, tree,
tsubst_flags_t, int, tree);
static tree for_each_template_parm (tree, tree_fn_t, void*,
hash_set<tree> *, bool, tree_fn_t = NULL);
static tree expand_template_argument_pack (tree);
static tree build_template_parm_index (int, int, int, tree, tree);
static bool inline_needs_template_parms (tree, bool);
static void push_inline_template_parms_recursive (tree, int);
static tree reduce_template_parm_level (tree, tree, int, tree, tsubst_flags_t);
static int mark_template_parm (tree, void *);
static int template_parm_this_level_p (tree, void *);
static tree tsubst_friend_function (tree, tree);
static tree tsubst_friend_class (tree, tree);
static int can_complete_type_without_circularity (tree);
static tree get_bindings (tree, tree, tree, bool);
static int template_decl_level (tree);
static int check_cv_quals_for_unify (int, tree, tree);
static void template_parm_level_and_index (tree, int*, int*);
static int unify_pack_expansion (tree, tree, tree,
tree, unification_kind_t, bool, bool);
static tree copy_template_args (tree);
static tree tsubst_template_arg (tree, tree, tsubst_flags_t, tree);
static tree tsubst_template_args (tree, tree, tsubst_flags_t, tree);
static tree tsubst_template_parms (tree, tree, tsubst_flags_t);
static tree most_specialized_partial_spec (tree, tsubst_flags_t);
static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int);
static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree);
static tree tsubst_function_type (tree, tree, tsubst_flags_t, tree);
static bool check_specialization_scope (void);
static tree process_partial_specialization (tree);
static void set_current_access_from_decl (tree);
static enum template_base_result get_template_base (tree, tree, tree, tree,
bool , tree *);
static tree try_class_unification (tree, tree, tree, tree, bool);
static int coerce_template_template_parms (tree, tree, tsubst_flags_t,
tree, tree);
static bool template_template_parm_bindings_ok_p (tree, tree);
static void tsubst_default_arguments (tree, tsubst_flags_t);
static tree for_each_template_parm_r (tree *, int *, void *);
static tree copy_default_args_to_explicit_spec_1 (tree, tree);
static void copy_default_args_to_explicit_spec (tree);
static int invalid_nontype_parm_type_p (tree, tsubst_flags_t);
static bool dependent_template_arg_p (tree);
static bool any_template_arguments_need_structural_equality_p (tree);
static bool dependent_type_p_r (tree);
static tree tsubst_copy (tree, tree, tsubst_flags_t, tree);
static tree tsubst_decl (tree, tree, tsubst_flags_t);
static void perform_typedefs_access_check (tree tmpl, tree targs);
static void append_type_to_template_for_access_check_1 (tree, tree, tree,
location_t);
static tree listify (tree);
static tree listify_autos (tree, tree);
static tree tsubst_template_parm (tree, tree, tsubst_flags_t);
static tree instantiate_alias_template (tree, tree, tsubst_flags_t);
static bool complex_alias_template_p (const_tree tmpl);
static tree tsubst_attributes (tree, tree, tsubst_flags_t, tree);
static tree canonicalize_expr_argument (tree, tsubst_flags_t);
/* Make the current scope suitable for access checking when we are
processing T. T can be FUNCTION_DECL for instantiated function
template, VAR_DECL for static member variable, or TYPE_DECL for
alias template (needed by instantiate_decl). */
static void
push_access_scope (tree t)
{
gcc_assert (VAR_OR_FUNCTION_DECL_P (t)
|| TREE_CODE (t) == TYPE_DECL);
if (DECL_FRIEND_CONTEXT (t))
push_nested_class (DECL_FRIEND_CONTEXT (t));
else if (DECL_CLASS_SCOPE_P (t))
push_nested_class (DECL_CONTEXT (t));
else
push_to_top_level ();
if (TREE_CODE (t) == FUNCTION_DECL)
{
saved_access_scope = tree_cons
(NULL_TREE, current_function_decl, saved_access_scope);
current_function_decl = t;
}
}
/* Restore the scope set up by push_access_scope. T is the node we
are processing. */
static void
pop_access_scope (tree t)
{
if (TREE_CODE (t) == FUNCTION_DECL)
{
current_function_decl = TREE_VALUE (saved_access_scope);
saved_access_scope = TREE_CHAIN (saved_access_scope);
}
if (DECL_FRIEND_CONTEXT (t) || DECL_CLASS_SCOPE_P (t))
pop_nested_class ();
else
pop_from_top_level ();
}
/* Do any processing required when DECL (a member template
declaration) is finished. Returns the TEMPLATE_DECL corresponding
to DECL, unless it is a specialization, in which case the DECL
itself is returned. */
tree
finish_member_template_decl (tree decl)
{
if (decl == error_mark_node)
return error_mark_node;
gcc_assert (DECL_P (decl));
if (TREE_CODE (decl) == TYPE_DECL)
{
tree type;
type = TREE_TYPE (decl);
if (type == error_mark_node)
return error_mark_node;
if (MAYBE_CLASS_TYPE_P (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (type))
{
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
check_member_template (tmpl);
return tmpl;
}
return NULL_TREE;
}
else if (TREE_CODE (decl) == FIELD_DECL)
error ("data member %qD cannot be a member template", decl);
else if (DECL_TEMPLATE_INFO (decl))
{
if (!DECL_TEMPLATE_SPECIALIZATION (decl))
{
check_member_template (DECL_TI_TEMPLATE (decl));
return DECL_TI_TEMPLATE (decl);
}
else
return decl;
}
else
error ("invalid member template declaration %qD", decl);
return error_mark_node;
}
/* Create a template info node. */
tree
build_template_info (tree template_decl, tree template_args)
{
tree result = make_node (TEMPLATE_INFO);
TI_TEMPLATE (result) = template_decl;
TI_ARGS (result) = template_args;
return result;
}
/* Return the template info node corresponding to T, whatever T is. */
tree
get_template_info (const_tree t)
{
tree tinfo = NULL_TREE;
if (!t || t == error_mark_node)
return NULL;
if (TREE_CODE (t) == NAMESPACE_DECL
|| TREE_CODE (t) == PARM_DECL)
return NULL;
if (DECL_P (t) && DECL_LANG_SPECIFIC (t))
tinfo = DECL_TEMPLATE_INFO (t);
if (!tinfo && DECL_IMPLICIT_TYPEDEF_P (t))
t = TREE_TYPE (t);
if (OVERLOAD_TYPE_P (t))
tinfo = TYPE_TEMPLATE_INFO (t);
else if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM)
tinfo = TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (t);
return tinfo;
}
/* Returns the template nesting level of the indicated class TYPE.
For example, in:
template <class T>
struct A
{
template <class U>
struct B {};
};
A<T>::B<U> has depth two, while A<T> has depth one.
Both A<T>::B<int> and A<int>::B<U> have depth one, if
they are instantiations, not specializations.
This function is guaranteed to return 0 if passed NULL_TREE so
that, for example, `template_class_depth (current_class_type)' is
always safe. */
int
template_class_depth (tree type)
{
int depth;
for (depth = 0; type && TREE_CODE (type) != NAMESPACE_DECL; )
{
tree tinfo = get_template_info (type);
if (tinfo && PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo))
&& uses_template_parms (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo))))
++depth;
if (DECL_P (type))
type = CP_DECL_CONTEXT (type);
else if (LAMBDA_TYPE_P (type))
type = LAMBDA_TYPE_EXTRA_SCOPE (type);
else
type = CP_TYPE_CONTEXT (type);
}
return depth;
}
/* Subroutine of maybe_begin_member_template_processing.
Returns true if processing DECL needs us to push template parms. */
static bool
inline_needs_template_parms (tree decl, bool nsdmi)
{
if (!decl || (!nsdmi && ! DECL_TEMPLATE_INFO (decl)))
return false;
return (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (most_general_template (decl)))
> (processing_template_decl + DECL_TEMPLATE_SPECIALIZATION (decl)));
}
/* Subroutine of maybe_begin_member_template_processing.
Push the template parms in PARMS, starting from LEVELS steps into the
chain, and ending at the beginning, since template parms are listed
innermost first. */
static void
push_inline_template_parms_recursive (tree parmlist, int levels)
{
tree parms = TREE_VALUE (parmlist);
int i;
if (levels > 1)
push_inline_template_parms_recursive (TREE_CHAIN (parmlist), levels - 1);
++processing_template_decl;
current_template_parms
= tree_cons (size_int (processing_template_decl),
parms, current_template_parms);
TEMPLATE_PARMS_FOR_INLINE (current_template_parms) = 1;
begin_scope (TREE_VEC_LENGTH (parms) ? sk_template_parms : sk_template_spec,
NULL);
for (i = 0; i < TREE_VEC_LENGTH (parms); ++i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (error_operand_p (parm))
continue;
gcc_assert (DECL_P (parm));
switch (TREE_CODE (parm))
{
case TYPE_DECL:
case TEMPLATE_DECL:
pushdecl (parm);
break;
case PARM_DECL:
/* Push the CONST_DECL. */
pushdecl (TEMPLATE_PARM_DECL (DECL_INITIAL (parm)));
break;
default:
gcc_unreachable ();
}
}
}
/* Restore the template parameter context for a member template, a
friend template defined in a class definition, or a non-template
member of template class. */
void
maybe_begin_member_template_processing (tree decl)
{
tree parms;
int levels = 0;
bool nsdmi = TREE_CODE (decl) == FIELD_DECL;
if (nsdmi)
{
tree ctx = DECL_CONTEXT (decl);
decl = (CLASSTYPE_TEMPLATE_INFO (ctx)
/* Disregard full specializations (c++/60999). */
&& uses_template_parms (ctx)
? CLASSTYPE_TI_TEMPLATE (ctx) : NULL_TREE);
}
if (inline_needs_template_parms (decl, nsdmi))
{
parms = DECL_TEMPLATE_PARMS (most_general_template (decl));
levels = TMPL_PARMS_DEPTH (parms) - processing_template_decl;
if (DECL_TEMPLATE_SPECIALIZATION (decl))
{
--levels;
parms = TREE_CHAIN (parms);
}
push_inline_template_parms_recursive (parms, levels);
}
/* Remember how many levels of template parameters we pushed so that
we can pop them later. */
inline_parm_levels.safe_push (levels);
}
/* Undo the effects of maybe_begin_member_template_processing. */
void
maybe_end_member_template_processing (void)
{
int i;
int last;
if (inline_parm_levels.length () == 0)
return;
last = inline_parm_levels.pop ();
for (i = 0; i < last; ++i)
{
--processing_template_decl;
current_template_parms = TREE_CHAIN (current_template_parms);
poplevel (0, 0, 0);
}
}
/* Return a new template argument vector which contains all of ARGS,
but has as its innermost set of arguments the EXTRA_ARGS. */
static tree
add_to_template_args (tree args, tree extra_args)
{
tree new_args;
int extra_depth;
int i;
int j;
if (args == NULL_TREE || extra_args == error_mark_node)
return extra_args;
extra_depth = TMPL_ARGS_DEPTH (extra_args);
new_args = make_tree_vec (TMPL_ARGS_DEPTH (args) + extra_depth);
for (i = 1; i <= TMPL_ARGS_DEPTH (args); ++i)
SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i));
for (j = 1; j <= extra_depth; ++j, ++i)
SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (extra_args, j));
return new_args;
}
/* Like add_to_template_args, but only the outermost ARGS are added to
the EXTRA_ARGS. In particular, all but TMPL_ARGS_DEPTH
(EXTRA_ARGS) levels are added. This function is used to combine
the template arguments from a partial instantiation with the
template arguments used to attain the full instantiation from the
partial instantiation. */
static tree
add_outermost_template_args (tree args, tree extra_args)
{
tree new_args;
/* If there are more levels of EXTRA_ARGS than there are ARGS,
something very fishy is going on. */
gcc_assert (TMPL_ARGS_DEPTH (args) >= TMPL_ARGS_DEPTH (extra_args));
/* If *all* the new arguments will be the EXTRA_ARGS, just return
them. */
if (TMPL_ARGS_DEPTH (args) == TMPL_ARGS_DEPTH (extra_args))
return extra_args;
/* For the moment, we make ARGS look like it contains fewer levels. */
TREE_VEC_LENGTH (args) -= TMPL_ARGS_DEPTH (extra_args);
new_args = add_to_template_args (args, extra_args);
/* Now, we restore ARGS to its full dimensions. */
TREE_VEC_LENGTH (args) += TMPL_ARGS_DEPTH (extra_args);
return new_args;
}
/* Return the N levels of innermost template arguments from the ARGS. */
tree
get_innermost_template_args (tree args, int n)
{
tree new_args;
int extra_levels;
int i;
gcc_assert (n >= 0);
/* If N is 1, just return the innermost set of template arguments. */
if (n == 1)
return TMPL_ARGS_LEVEL (args, TMPL_ARGS_DEPTH (args));
/* If we're not removing anything, just return the arguments we were
given. */
extra_levels = TMPL_ARGS_DEPTH (args) - n;
gcc_assert (extra_levels >= 0);
if (extra_levels == 0)
return args;
/* Make a new set of arguments, not containing the outer arguments. */
new_args = make_tree_vec (n);
for (i = 1; i <= n; ++i)
SET_TMPL_ARGS_LEVEL (new_args, i,
TMPL_ARGS_LEVEL (args, i + extra_levels));
return new_args;
}
/* The inverse of get_innermost_template_args: Return all but the innermost
EXTRA_LEVELS levels of template arguments from the ARGS. */
static tree
strip_innermost_template_args (tree args, int extra_levels)
{
tree new_args;
int n = TMPL_ARGS_DEPTH (args) - extra_levels;
int i;
gcc_assert (n >= 0);
/* If N is 1, just return the outermost set of template arguments. */
if (n == 1)
return TMPL_ARGS_LEVEL (args, 1);
/* If we're not removing anything, just return the arguments we were
given. */
gcc_assert (extra_levels >= 0);
if (extra_levels == 0)
return args;
/* Make a new set of arguments, not containing the inner arguments. */
new_args = make_tree_vec (n);
for (i = 1; i <= n; ++i)
SET_TMPL_ARGS_LEVEL (new_args, i,
TMPL_ARGS_LEVEL (args, i));
return new_args;
}
/* We've got a template header coming up; push to a new level for storing
the parms. */
void
begin_template_parm_list (void)
{
/* We use a non-tag-transparent scope here, which causes pushtag to
put tags in this scope, rather than in the enclosing class or
namespace scope. This is the right thing, since we want
TEMPLATE_DECLS, and not TYPE_DECLS for template classes. For a
global template class, push_template_decl handles putting the
TEMPLATE_DECL into top-level scope. For a nested template class,
e.g.:
template <class T> struct S1 {
template <class T> struct S2 {};
};
pushtag contains special code to call pushdecl_with_scope on the
TEMPLATE_DECL for S2. */
begin_scope (sk_template_parms, NULL);
++processing_template_decl;
++processing_template_parmlist;
note_template_header (0);
/* Add a dummy parameter level while we process the parameter list. */
current_template_parms
= tree_cons (size_int (processing_template_decl),
make_tree_vec (0),
current_template_parms);
}
/* This routine is called when a specialization is declared. If it is
invalid to declare a specialization here, an error is reported and
false is returned, otherwise this routine will return true. */
static bool
check_specialization_scope (void)
{
tree scope = current_scope ();
/* [temp.expl.spec]
An explicit specialization shall be declared in the namespace of
which the template is a member, or, for member templates, in the
namespace of which the enclosing class or enclosing class
template is a member. An explicit specialization of a member
function, member class or static data member of a class template
shall be declared in the namespace of which the class template
is a member. */
if (scope && TREE_CODE (scope) != NAMESPACE_DECL)
{
error ("explicit specialization in non-namespace scope %qD", scope);
return false;
}
/* [temp.expl.spec]
In an explicit specialization declaration for a member of a class
template or a member template that appears in namespace scope,
the member template and some of its enclosing class templates may
remain unspecialized, except that the declaration shall not
explicitly specialize a class member template if its enclosing
class templates are not explicitly specialized as well. */
if (current_template_parms)
{
error ("enclosing class templates are not explicitly specialized");
return false;
}
return true;
}
/* We've just seen template <>. */
bool
begin_specialization (void)
{
begin_scope (sk_template_spec, NULL);
note_template_header (1);
return check_specialization_scope ();
}
/* Called at then end of processing a declaration preceded by
template<>. */
void
end_specialization (void)
{
finish_scope ();
reset_specialization ();
}
/* Any template <>'s that we have seen thus far are not referring to a
function specialization. */
void
reset_specialization (void)
{
processing_specialization = 0;
template_header_count = 0;
}
/* We've just seen a template header. If SPECIALIZATION is nonzero,
it was of the form template <>. */
static void
note_template_header (int specialization)
{
processing_specialization = specialization;
template_header_count++;
}
/* We're beginning an explicit instantiation. */
void
begin_explicit_instantiation (void)
{
gcc_assert (!processing_explicit_instantiation);
processing_explicit_instantiation = true;
}
void
end_explicit_instantiation (void)
{
gcc_assert (processing_explicit_instantiation);
processing_explicit_instantiation = false;
}
/* An explicit specialization or partial specialization of TMPL is being
declared. Check that the namespace in which the specialization is
occurring is permissible. Returns false iff it is invalid to
specialize TMPL in the current namespace. */
static bool
check_specialization_namespace (tree tmpl)
{
tree tpl_ns = decl_namespace_context (tmpl);
/* [tmpl.expl.spec]
An explicit specialization shall be declared in a namespace enclosing the
specialized template. An explicit specialization whose declarator-id is
not qualified shall be declared in the nearest enclosing namespace of the
template, or, if the namespace is inline (7.3.1), any namespace from its
enclosing namespace set. */
if (current_scope() != DECL_CONTEXT (tmpl)
&& !at_namespace_scope_p ())
{
error ("specialization of %qD must appear at namespace scope", tmpl);
return false;
}
if (cxx_dialect < cxx11
? is_associated_namespace (current_namespace, tpl_ns)
: is_ancestor (current_namespace, tpl_ns))
/* Same or enclosing namespace. */
return true;
else
{
permerror (input_location,
"specialization of %qD in different namespace", tmpl);
inform (DECL_SOURCE_LOCATION (tmpl),
" from definition of %q#D", tmpl);
return false;
}
}
/* SPEC is an explicit instantiation. Check that it is valid to
perform this explicit instantiation in the current namespace. */
static void
check_explicit_instantiation_namespace (tree spec)
{
tree ns;
/* DR 275: An explicit instantiation shall appear in an enclosing
namespace of its template. */
ns = decl_namespace_context (spec);
if (!is_ancestor (current_namespace, ns))
permerror (input_location, "explicit instantiation of %qD in namespace %qD "
"(which does not enclose namespace %qD)",
spec, current_namespace, ns);
}
// Returns the type of a template specialization only if that
// specialization needs to be defined. Otherwise (e.g., if the type has
// already been defined), the function returns NULL_TREE.
static tree
maybe_new_partial_specialization (tree type)
{
// An implicit instantiation of an incomplete type implies
// the definition of a new class template.
//
// template<typename T>
// struct S;
//
// template<typename T>
// struct S<T*>;
//
// Here, S<T*> is an implicit instantiation of S whose type
// is incomplete.
if (CLASSTYPE_IMPLICIT_INSTANTIATION (type) && !COMPLETE_TYPE_P (type))
return type;
// It can also be the case that TYPE is a completed specialization.
// Continuing the previous example, suppose we also declare:
//
// template<typename T>
// requires Integral<T>
// struct S<T*>;
//
// Here, S<T*> refers to the specialization S<T*> defined
// above. However, we need to differentiate definitions because
// we intend to define a new partial specialization. In this case,
// we rely on the fact that the constraints are different for
// this declaration than that above.
//
// Note that we also get here for injected class names and
// late-parsed template definitions. We must ensure that we
// do not create new type declarations for those cases.
if (flag_concepts && CLASSTYPE_TEMPLATE_SPECIALIZATION (type))
{
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
tree args = CLASSTYPE_TI_ARGS (type);
// If there are no template parameters, this cannot be a new
// partial template specializtion?
if (!current_template_parms)
return NULL_TREE;
// The injected-class-name is not a new partial specialization.
if (DECL_SELF_REFERENCE_P (TYPE_NAME (type)))
return NULL_TREE;
// If the constraints are not the same as those of the primary
// then, we can probably create a new specialization.
tree type_constr = current_template_constraints ();
if (type == TREE_TYPE (tmpl))
{
tree main_constr = get_constraints (tmpl);
if (equivalent_constraints (type_constr, main_constr))
return NULL_TREE;
}
// Also, if there's a pre-existing specialization with matching
// constraints, then this also isn't new.
tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl);
while (specs)
{
tree spec_tmpl = TREE_VALUE (specs);
tree spec_args = TREE_PURPOSE (specs);
tree spec_constr = get_constraints (spec_tmpl);
if (comp_template_args (args, spec_args)
&& equivalent_constraints (type_constr, spec_constr))
return NULL_TREE;
specs = TREE_CHAIN (specs);
}
// Create a new type node (and corresponding type decl)
// for the newly declared specialization.
tree t = make_class_type (TREE_CODE (type));
CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (type);
SET_TYPE_TEMPLATE_INFO (t, build_template_info (tmpl, args));
/* We only need a separate type node for storing the definition of this
partial specialization; uses of S<T*> are unconstrained, so all are
equivalent. So keep TYPE_CANONICAL the same. */
TYPE_CANONICAL (t) = TYPE_CANONICAL (type);
// Build the corresponding type decl.
tree d = create_implicit_typedef (DECL_NAME (tmpl), t);
DECL_CONTEXT (d) = TYPE_CONTEXT (t);
DECL_SOURCE_LOCATION (d) = input_location;
return t;
}
return NULL_TREE;
}
/* The TYPE is being declared. If it is a template type, that means it
is a partial specialization. Do appropriate error-checking. */
tree
maybe_process_partial_specialization (tree type)
{
tree context;
if (type == error_mark_node)
return error_mark_node;
/* A lambda that appears in specialization context is not itself a
specialization. */
if (CLASS_TYPE_P (type) && CLASSTYPE_LAMBDA_EXPR (type))
return type;
if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
error ("name of class shadows template template parameter %qD",
TYPE_NAME (type));
return error_mark_node;
}
context = TYPE_CONTEXT (type);
if (TYPE_ALIAS_P (type))
{
tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (type);
if (tinfo && DECL_ALIAS_TEMPLATE_P (TI_TEMPLATE (tinfo)))
error ("specialization of alias template %qD",
TI_TEMPLATE (tinfo));
else
error ("explicit specialization of non-template %qT", type);
return error_mark_node;
}
else if (CLASS_TYPE_P (type) && CLASSTYPE_USE_TEMPLATE (type))
{
/* This is for ordinary explicit specialization and partial
specialization of a template class such as:
template <> class C<int>;
or:
template <class T> class C<T*>;
Make sure that `C<int>' and `C<T*>' are implicit instantiations. */
if (tree t = maybe_new_partial_specialization (type))
{
if (!check_specialization_namespace (CLASSTYPE_TI_TEMPLATE (t))
&& !at_namespace_scope_p ())
return error_mark_node;
SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (t);
DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (t)) = input_location;
if (processing_template_decl)
{
tree decl = push_template_decl (TYPE_MAIN_DECL (t));
if (decl == error_mark_node)
return error_mark_node;
return TREE_TYPE (decl);
}
}
else if (CLASSTYPE_TEMPLATE_INSTANTIATION (type))
error ("specialization of %qT after instantiation", type);
else if (errorcount && !processing_specialization
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (type)
&& !uses_template_parms (CLASSTYPE_TI_ARGS (type)))
/* Trying to define a specialization either without a template<> header
or in an inappropriate place. We've already given an error, so just
bail now so we don't actually define the specialization. */
return error_mark_node;
}
else if (CLASS_TYPE_P (type)
&& !CLASSTYPE_USE_TEMPLATE (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& context && CLASS_TYPE_P (context)
&& CLASSTYPE_TEMPLATE_INFO (context))
{
/* This is for an explicit specialization of member class
template according to [temp.expl.spec/18]:
template <> template <class U> class C<int>::D;
The context `C<int>' must be an implicit instantiation.
Otherwise this is just a member class template declared
earlier like:
template <> class C<int> { template <class U> class D; };
template <> template <class U> class C<int>::D;
In the first case, `C<int>::D' is a specialization of `C<T>::D'
while in the second case, `C<int>::D' is a primary template
and `C<T>::D' may not exist. */
if (CLASSTYPE_IMPLICIT_INSTANTIATION (context)
&& !COMPLETE_TYPE_P (type))
{
tree t;
tree tmpl = CLASSTYPE_TI_TEMPLATE (type);
if (current_namespace
!= decl_namespace_context (tmpl))
{
permerror (input_location,
"specializing %q#T in different namespace", type);
permerror (DECL_SOURCE_LOCATION (tmpl),
" from definition of %q#D", tmpl);
}
/* Check for invalid specialization after instantiation:
template <> template <> class C<int>::D<int>;
template <> template <class U> class C<int>::D; */
for (t = DECL_TEMPLATE_INSTANTIATIONS (tmpl);
t; t = TREE_CHAIN (t))
{
tree inst = TREE_VALUE (t);
if (CLASSTYPE_TEMPLATE_SPECIALIZATION (inst)
|| !COMPLETE_OR_OPEN_TYPE_P (inst))
{
/* We already have a full specialization of this partial
instantiation, or a full specialization has been
looked up but not instantiated. Reassign it to the
new member specialization template. */
spec_entry elt;
spec_entry *entry;
elt.tmpl = most_general_template (tmpl);
elt.args = CLASSTYPE_TI_ARGS (inst);
elt.spec = inst;
type_specializations->remove_elt (&elt);
elt.tmpl = tmpl;
elt.args = INNERMOST_TEMPLATE_ARGS (elt.args);
spec_entry **slot
= type_specializations->find_slot (&elt, INSERT);
entry = ggc_alloc<spec_entry> ();
*entry = elt;
*slot = entry;
}
else
/* But if we've had an implicit instantiation, that's a
problem ([temp.expl.spec]/6). */
error ("specialization %qT after instantiation %qT",
type, inst);
}
/* Mark TYPE as a specialization. And as a result, we only
have one level of template argument for the innermost
class template. */
SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (type);
DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)) = input_location;
CLASSTYPE_TI_ARGS (type)
= INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type));
}
}
else if (processing_specialization)
{
/* Someday C++0x may allow for enum template specialization. */
if (cxx_dialect > cxx98 && TREE_CODE (type) == ENUMERAL_TYPE
&& CLASS_TYPE_P (context) && CLASSTYPE_USE_TEMPLATE (context))
pedwarn (input_location, OPT_Wpedantic, "template specialization "
"of %qD not allowed by ISO C++", type);
else
{
error ("explicit specialization of non-template %qT", type);
return error_mark_node;
}
}
return type;
}
/* Returns nonzero if we can optimize the retrieval of specializations
for TMPL, a TEMPLATE_DECL. In particular, for such a template, we
do not use DECL_TEMPLATE_SPECIALIZATIONS at all. */
static inline bool
optimize_specialization_lookup_p (tree tmpl)
{
return (DECL_FUNCTION_TEMPLATE_P (tmpl)
&& DECL_CLASS_SCOPE_P (tmpl)
/* DECL_CLASS_SCOPE_P holds of T::f even if T is a template
parameter. */
&& CLASS_TYPE_P (DECL_CONTEXT (tmpl))
/* The optimized lookup depends on the fact that the
template arguments for the member function template apply
purely to the containing class, which is not true if the
containing class is an explicit or partial
specialization. */
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (tmpl))
&& !DECL_MEMBER_TEMPLATE_P (tmpl)
&& !DECL_CONV_FN_P (tmpl)
/* It is possible to have a template that is not a member
template and is not a member of a template class:
template <typename T>
struct S { friend A::f(); };
Here, the friend function is a template, but the context does
not have template information. The optimized lookup relies
on having ARGS be the template arguments for both the class
and the function template. */
&& !DECL_FRIEND_P (DECL_TEMPLATE_RESULT (tmpl)));
}
/* Make sure ARGS doesn't use any inappropriate typedefs; we should have
gone through coerce_template_parms by now. */
static void
verify_unstripped_args (tree args)
{
++processing_template_decl;
if (!any_dependent_template_arguments_p (args))
{
tree inner = INNERMOST_TEMPLATE_ARGS (args);
for (int i = 0; i < TREE_VEC_LENGTH (inner); ++i)
{
tree arg = TREE_VEC_ELT (inner, i);
if (TREE_CODE (arg) == TEMPLATE_DECL)
/* OK */;
else if (TYPE_P (arg))
gcc_assert (strip_typedefs (arg, NULL) == arg);
else if (strip_typedefs (TREE_TYPE (arg), NULL) != TREE_TYPE (arg))
/* Allow typedefs on the type of a non-type argument, since a
parameter can have them. */;
else
gcc_assert (strip_typedefs_expr (arg, NULL) == arg);
}
}
--processing_template_decl;
}
/* Retrieve the specialization (in the sense of [temp.spec] - a
specialization is either an instantiation or an explicit
specialization) of TMPL for the given template ARGS. If there is
no such specialization, return NULL_TREE. The ARGS are a vector of
arguments, or a vector of vectors of arguments, in the case of
templates with more than one level of parameters.
If TMPL is a type template and CLASS_SPECIALIZATIONS_P is true,
then we search for a partial specialization matching ARGS. This
parameter is ignored if TMPL is not a class template.
We can also look up a FIELD_DECL, if it is a lambda capture pack; the
result is a NONTYPE_ARGUMENT_PACK. */
static tree
retrieve_specialization (tree tmpl, tree args, hashval_t hash)
{
if (tmpl == NULL_TREE)
return NULL_TREE;
if (args == error_mark_node)
return NULL_TREE;
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL
|| TREE_CODE (tmpl) == FIELD_DECL);
/* There should be as many levels of arguments as there are
levels of parameters. */
gcc_assert (TMPL_ARGS_DEPTH (args)
== (TREE_CODE (tmpl) == TEMPLATE_DECL
? TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl))
: template_class_depth (DECL_CONTEXT (tmpl))));
if (flag_checking)
verify_unstripped_args (args);
if (optimize_specialization_lookup_p (tmpl))
{
tree class_template;
tree class_specialization;
vec<tree, va_gc> *methods;
tree fns;
int idx;
/* The template arguments actually apply to the containing
class. Find the class specialization with those
arguments. */
class_template = CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (tmpl));
class_specialization
= retrieve_specialization (class_template, args, 0);
if (!class_specialization)
return NULL_TREE;
/* Now, find the appropriate entry in the CLASSTYPE_METHOD_VEC
for the specialization. */
idx = class_method_index_for_fn (class_specialization, tmpl);
if (idx == -1)
return NULL_TREE;
/* Iterate through the methods with the indicated name, looking
for the one that has an instance of TMPL. */
methods = CLASSTYPE_METHOD_VEC (class_specialization);
for (fns = (*methods)[idx]; fns; fns = OVL_NEXT (fns))
{
tree fn = OVL_CURRENT (fns);
if (DECL_TEMPLATE_INFO (fn) && DECL_TI_TEMPLATE (fn) == tmpl
/* using-declarations can add base methods to the method vec,
and we don't want those here. */
&& DECL_CONTEXT (fn) == class_specialization)
return fn;
}
return NULL_TREE;
}
else
{
spec_entry *found;
spec_entry elt;
hash_table<spec_hasher> *specializations;
elt.tmpl = tmpl;
elt.args = args;
elt.spec = NULL_TREE;
if (DECL_CLASS_TEMPLATE_P (tmpl))
specializations = type_specializations;
else
specializations = decl_specializations;
if (hash == 0)
hash = spec_hasher::hash (&elt);
found = specializations->find_with_hash (&elt, hash);
if (found)
return found->spec;
}
return NULL_TREE;
}
/* Like retrieve_specialization, but for local declarations. */
tree
retrieve_local_specialization (tree tmpl)
{
if (local_specializations == NULL)
return NULL_TREE;
tree *slot = local_specializations->get (tmpl);
return slot ? *slot : NULL_TREE;
}
/* Returns nonzero iff DECL is a specialization of TMPL. */
int
is_specialization_of (tree decl, tree tmpl)
{
tree t;
if (TREE_CODE (decl) == FUNCTION_DECL)
{
for (t = decl;
t != NULL_TREE;
t = DECL_TEMPLATE_INFO (t) ? DECL_TI_TEMPLATE (t) : NULL_TREE)
if (t == tmpl)
return 1;
}
else
{
gcc_assert (TREE_CODE (decl) == TYPE_DECL);
for (t = TREE_TYPE (decl);
t != NULL_TREE;
t = CLASSTYPE_USE_TEMPLATE (t)
? TREE_TYPE (CLASSTYPE_TI_TEMPLATE (t)) : NULL_TREE)
if (same_type_ignoring_top_level_qualifiers_p (t, TREE_TYPE (tmpl)))
return 1;
}
return 0;
}
/* Returns nonzero iff DECL is a specialization of friend declaration
FRIEND_DECL according to [temp.friend]. */
bool
is_specialization_of_friend (tree decl, tree friend_decl)
{
bool need_template = true;
int template_depth;
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL
|| TREE_CODE (decl) == TYPE_DECL);
/* For [temp.friend/6] when FRIEND_DECL is an ordinary member function
of a template class, we want to check if DECL is a specialization
if this. */
if (TREE_CODE (friend_decl) == FUNCTION_DECL
&& DECL_TEMPLATE_INFO (friend_decl)
&& !DECL_USE_TEMPLATE (friend_decl))
{
/* We want a TEMPLATE_DECL for `is_specialization_of'. */
friend_decl = DECL_TI_TEMPLATE (friend_decl);
need_template = false;
}
else if (TREE_CODE (friend_decl) == TEMPLATE_DECL
&& !PRIMARY_TEMPLATE_P (friend_decl))
need_template = false;
/* There is nothing to do if this is not a template friend. */
if (TREE_CODE (friend_decl) != TEMPLATE_DECL)
return false;
if (is_specialization_of (decl, friend_decl))
return true;
/* [temp.friend/6]
A member of a class template may be declared to be a friend of a
non-template class. In this case, the corresponding member of
every specialization of the class template is a friend of the
class granting friendship.
For example, given a template friend declaration
template <class T> friend void A<T>::f();
the member function below is considered a friend
template <> struct A<int> {
void f();
};
For this type of template friend, TEMPLATE_DEPTH below will be
nonzero. To determine if DECL is a friend of FRIEND, we first
check if the enclosing class is a specialization of another. */
template_depth = template_class_depth (CP_DECL_CONTEXT (friend_decl));
if (template_depth
&& DECL_CLASS_SCOPE_P (decl)
&& is_specialization_of (TYPE_NAME (DECL_CONTEXT (decl)),
CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (friend_decl))))
{
/* Next, we check the members themselves. In order to handle
a few tricky cases, such as when FRIEND_DECL's are
template <class T> friend void A<T>::g(T t);
template <class T> template <T t> friend void A<T>::h();
and DECL's are
void A<int>::g(int);
template <int> void A<int>::h();
we need to figure out ARGS, the template arguments from
the context of DECL. This is required for template substitution
of `T' in the function parameter of `g' and template parameter
of `h' in the above examples. Here ARGS corresponds to `int'. */
tree context = DECL_CONTEXT (decl);
tree args = NULL_TREE;
int current_depth = 0;
while (current_depth < template_depth)
{
if (CLASSTYPE_TEMPLATE_INFO (context))
{
if (current_depth == 0)
args = TYPE_TI_ARGS (context);
else
args = add_to_template_args (TYPE_TI_ARGS (context), args);
current_depth++;
}
context = TYPE_CONTEXT (context);
}
if (TREE_CODE (decl) == FUNCTION_DECL)
{
bool is_template;
tree friend_type;
tree decl_type;
tree friend_args_type;
tree decl_args_type;
/* Make sure that both DECL and FRIEND_DECL are templates or
non-templates. */
is_template = DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl));
if (need_template ^ is_template)
return false;
else if (is_template)
{
/* If both are templates, check template parameter list. */
tree friend_parms
= tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl),
args, tf_none);
if (!comp_template_parms
(DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (decl)),
friend_parms))
return false;
decl_type = TREE_TYPE (DECL_TI_TEMPLATE (decl));
}
else
decl_type = TREE_TYPE (decl);
friend_type = tsubst_function_type (TREE_TYPE (friend_decl), args,
tf_none, NULL_TREE);
if (friend_type == error_mark_node)
return false;
/* Check if return types match. */
if (!same_type_p (TREE_TYPE (decl_type), TREE_TYPE (friend_type)))
return false;
/* Check if function parameter types match, ignoring the
`this' parameter. */
friend_args_type = TYPE_ARG_TYPES (friend_type);
decl_args_type = TYPE_ARG_TYPES (decl_type);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (friend_decl))
friend_args_type = TREE_CHAIN (friend_args_type);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
decl_args_type = TREE_CHAIN (decl_args_type);
return compparms (decl_args_type, friend_args_type);
}
else
{
/* DECL is a TYPE_DECL */
bool is_template;
tree decl_type = TREE_TYPE (decl);
/* Make sure that both DECL and FRIEND_DECL are templates or
non-templates. */
is_template
= CLASSTYPE_TEMPLATE_INFO (decl_type)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (decl_type));
if (need_template ^ is_template)
return false;
else if (is_template)
{
tree friend_parms;
/* If both are templates, check the name of the two
TEMPLATE_DECL's first because is_friend didn't. */
if (DECL_NAME (CLASSTYPE_TI_TEMPLATE (decl_type))
!= DECL_NAME (friend_decl))
return false;
/* Now check template parameter list. */
friend_parms
= tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl),
args, tf_none);
return comp_template_parms
(DECL_TEMPLATE_PARMS (CLASSTYPE_TI_TEMPLATE (decl_type)),
friend_parms);
}
else
return (DECL_NAME (decl)
== DECL_NAME (friend_decl));
}
}
return false;
}
/* Register the specialization SPEC as a specialization of TMPL with
the indicated ARGS. IS_FRIEND indicates whether the specialization
is actually just a friend declaration. Returns SPEC, or an
equivalent prior declaration, if available.
We also store instantiations of field packs in the hash table, even
though they are not themselves templates, to make lookup easier. */
static tree
register_specialization (tree spec, tree tmpl, tree args, bool is_friend,
hashval_t hash)
{
tree fn;
spec_entry **slot = NULL;
spec_entry elt;
gcc_assert ((TREE_CODE (tmpl) == TEMPLATE_DECL && DECL_P (spec))
|| (TREE_CODE (tmpl) == FIELD_DECL
&& TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK));
if (TREE_CODE (spec) == FUNCTION_DECL
&& uses_template_parms (DECL_TI_ARGS (spec)))
/* This is the FUNCTION_DECL for a partial instantiation. Don't
register it; we want the corresponding TEMPLATE_DECL instead.
We use `uses_template_parms (DECL_TI_ARGS (spec))' rather than
the more obvious `uses_template_parms (spec)' to avoid problems
with default function arguments. In particular, given
something like this:
template <class T> void f(T t1, T t = T())
the default argument expression is not substituted for in an
instantiation unless and until it is actually needed. */
return spec;
if (optimize_specialization_lookup_p (tmpl))
/* We don't put these specializations in the hash table, but we might
want to give an error about a mismatch. */
fn = retrieve_specialization (tmpl, args, 0);
else
{
elt.tmpl = tmpl;
elt.args = args;
elt.spec = spec;
if (hash == 0)
hash = spec_hasher::hash (&elt);
slot =
decl_specializations->find_slot_with_hash (&elt, hash, INSERT);
if (*slot)
fn = ((spec_entry *) *slot)->spec;
else
fn = NULL_TREE;
}
/* We can sometimes try to re-register a specialization that we've
already got. In particular, regenerate_decl_from_template calls
duplicate_decls which will update the specialization list. But,
we'll still get called again here anyhow. It's more convenient
to simply allow this than to try to prevent it. */
if (fn == spec)
return spec;
else if (fn && DECL_TEMPLATE_SPECIALIZATION (spec))
{
if (DECL_TEMPLATE_INSTANTIATION (fn))
{
if (DECL_ODR_USED (fn)
|| DECL_EXPLICIT_INSTANTIATION (fn))
{
error ("specialization of %qD after instantiation",
fn);
return error_mark_node;
}
else
{
tree clone;
/* This situation should occur only if the first
specialization is an implicit instantiation, the
second is an explicit specialization, and the
implicit instantiation has not yet been used. That
situation can occur if we have implicitly
instantiated a member function and then specialized
it later.
We can also wind up here if a friend declaration that
looked like an instantiation turns out to be a
specialization:
template <class T> void foo(T);
class S { friend void foo<>(int) };
template <> void foo(int);
We transform the existing DECL in place so that any
pointers to it become pointers to the updated
declaration.
If there was a definition for the template, but not
for the specialization, we want this to look as if
there were no definition, and vice versa. */
DECL_INITIAL (fn) = NULL_TREE;
duplicate_decls (spec, fn, is_friend);
/* The call to duplicate_decls will have applied
[temp.expl.spec]:
An explicit specialization of a function template
is inline only if it is explicitly declared to be,
and independently of whether its function template
is.
to the primary function; now copy the inline bits to
the various clones. */
FOR_EACH_CLONE (clone, fn)
{
DECL_DECLARED_INLINE_P (clone)
= DECL_DECLARED_INLINE_P (fn);
DECL_SOURCE_LOCATION (clone)
= DECL_SOURCE_LOCATION (fn);
DECL_DELETED_FN (clone)
= DECL_DELETED_FN (fn);
}
check_specialization_namespace (tmpl);
return fn;
}
}
else if (DECL_TEMPLATE_SPECIALIZATION (fn))
{
tree dd = duplicate_decls (spec, fn, is_friend);
if (dd == error_mark_node)
/* We've already complained in duplicate_decls. */
return error_mark_node;
if (dd == NULL_TREE && DECL_INITIAL (spec))
/* Dup decl failed, but this is a new definition. Set the
line number so any errors match this new
definition. */
DECL_SOURCE_LOCATION (fn) = DECL_SOURCE_LOCATION (spec);
return fn;
}
}
else if (fn)
return duplicate_decls (spec, fn, is_friend);
/* A specialization must be declared in the same namespace as the
template it is specializing. */
if (DECL_P (spec) && DECL_TEMPLATE_SPECIALIZATION (spec)
&& !check_specialization_namespace (tmpl))
DECL_CONTEXT (spec) = DECL_CONTEXT (tmpl);
if (slot != NULL /* !optimize_specialization_lookup_p (tmpl) */)
{
spec_entry *entry = ggc_alloc<spec_entry> ();
gcc_assert (tmpl && args && spec);
*entry = elt;
*slot = entry;
if ((TREE_CODE (spec) == FUNCTION_DECL && DECL_NAMESPACE_SCOPE_P (spec)
&& PRIMARY_TEMPLATE_P (tmpl)
&& DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (tmpl)) == NULL_TREE)
|| variable_template_p (tmpl))
/* If TMPL is a forward declaration of a template function, keep a list
of all specializations in case we need to reassign them to a friend
template later in tsubst_friend_function.
Also keep a list of all variable template instantiations so that
process_partial_specialization can check whether a later partial
specialization would have used it. */
DECL_TEMPLATE_INSTANTIATIONS (tmpl)
= tree_cons (args, spec, DECL_TEMPLATE_INSTANTIATIONS (tmpl));
}
return spec;
}
/* Returns true iff two spec_entry nodes are equivalent. */
int comparing_specializations;
bool
spec_hasher::equal (spec_entry *e1, spec_entry *e2)
{
int equal;
++comparing_specializations;
equal = (e1->tmpl == e2->tmpl
&& comp_template_args (e1->args, e2->args));
if (equal && flag_concepts
/* tmpl could be a FIELD_DECL for a capture pack. */
&& TREE_CODE (e1->tmpl) == TEMPLATE_DECL
&& VAR_P (DECL_TEMPLATE_RESULT (e1->tmpl))
&& uses_template_parms (e1->args))
{
/* Partial specializations of a variable template can be distinguished by
constraints. */
tree c1 = e1->spec ? get_constraints (e1->spec) : NULL_TREE;
tree c2 = e2->spec ? get_constraints (e2->spec) : NULL_TREE;
equal = equivalent_constraints (c1, c2);
}
--comparing_specializations;
return equal;
}
/* Returns a hash for a template TMPL and template arguments ARGS. */
static hashval_t
hash_tmpl_and_args (tree tmpl, tree args)
{
hashval_t val = iterative_hash_object (DECL_UID (tmpl), 0);
return iterative_hash_template_arg (args, val);
}
/* Returns a hash for a spec_entry node based on the TMPL and ARGS members,
ignoring SPEC. */
hashval_t
spec_hasher::hash (spec_entry *e)
{
return hash_tmpl_and_args (e->tmpl, e->args);
}
/* Recursively calculate a hash value for a template argument ARG, for use
in the hash tables of template specializations. */
hashval_t
iterative_hash_template_arg (tree arg, hashval_t val)
{
unsigned HOST_WIDE_INT i;
enum tree_code code;
char tclass;
if (arg == NULL_TREE)
return iterative_hash_object (arg, val);
if (!TYPE_P (arg))
STRIP_NOPS (arg);
if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT)
gcc_unreachable ();
code = TREE_CODE (arg);
tclass = TREE_CODE_CLASS (code);
val = iterative_hash_object (code, val);
switch (code)
{
case ERROR_MARK:
return val;
case IDENTIFIER_NODE:
return iterative_hash_object (IDENTIFIER_HASH_VALUE (arg), val);
case TREE_VEC:
{
int i, len = TREE_VEC_LENGTH (arg);
for (i = 0; i < len; ++i)
val = iterative_hash_template_arg (TREE_VEC_ELT (arg, i), val);
return val;
}
case TYPE_PACK_EXPANSION:
case EXPR_PACK_EXPANSION:
val = iterative_hash_template_arg (PACK_EXPANSION_PATTERN (arg), val);
return iterative_hash_template_arg (PACK_EXPANSION_EXTRA_ARGS (arg), val);
case TYPE_ARGUMENT_PACK:
case NONTYPE_ARGUMENT_PACK:
return iterative_hash_template_arg (ARGUMENT_PACK_ARGS (arg), val);
case TREE_LIST:
for (; arg; arg = TREE_CHAIN (arg))
val = iterative_hash_template_arg (TREE_VALUE (arg), val);
return val;
case OVERLOAD:
for (; arg; arg = OVL_NEXT (arg))
val = iterative_hash_template_arg (OVL_CURRENT (arg), val);
return val;
case CONSTRUCTOR:
{
tree field, value;
iterative_hash_template_arg (TREE_TYPE (arg), val);
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (arg), i, field, value)
{
val = iterative_hash_template_arg (field, val);
val = iterative_hash_template_arg (value, val);
}
return val;
}
case PARM_DECL:
if (!DECL_ARTIFICIAL (arg))
{
val = iterative_hash_object (DECL_PARM_INDEX (arg), val);
val = iterative_hash_object (DECL_PARM_LEVEL (arg), val);
}
return iterative_hash_template_arg (TREE_TYPE (arg), val);
case TARGET_EXPR:
return iterative_hash_template_arg (TARGET_EXPR_INITIAL (arg), val);
case PTRMEM_CST:
val = iterative_hash_template_arg (PTRMEM_CST_CLASS (arg), val);
return iterative_hash_template_arg (PTRMEM_CST_MEMBER (arg), val);
case TEMPLATE_PARM_INDEX:
val = iterative_hash_template_arg
(TREE_TYPE (TEMPLATE_PARM_DECL (arg)), val);
val = iterative_hash_object (TEMPLATE_PARM_LEVEL (arg), val);
return iterative_hash_object (TEMPLATE_PARM_IDX (arg), val);
case TRAIT_EXPR:
val = iterative_hash_object (TRAIT_EXPR_KIND (arg), val);
val = iterative_hash_template_arg (TRAIT_EXPR_TYPE1 (arg), val);
return iterative_hash_template_arg (TRAIT_EXPR_TYPE2 (arg), val);
case BASELINK:
val = iterative_hash_template_arg (BINFO_TYPE (BASELINK_BINFO (arg)),
val);
return iterative_hash_template_arg (DECL_NAME (get_first_fn (arg)),
val);
case MODOP_EXPR:
val = iterative_hash_template_arg (TREE_OPERAND (arg, 0), val);
code = TREE_CODE (TREE_OPERAND (arg, 1));
val = iterative_hash_object (code, val);
return iterative_hash_template_arg (TREE_OPERAND (arg, 2), val);
case LAMBDA_EXPR:
/* A lambda can't appear in a template arg, but don't crash on
erroneous input. */
gcc_assert (seen_error ());
return val;
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case STATIC_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case NEW_EXPR:
val = iterative_hash_template_arg (TREE_TYPE (arg), val);
/* Now hash operands as usual. */
break;
default:
break;
}
switch (tclass)
{
case tcc_type:
if (alias_template_specialization_p (arg))
{
// We want an alias specialization that survived strip_typedefs
// to hash differently from its TYPE_CANONICAL, to avoid hash
// collisions that compare as different in template_args_equal.
// These could be dependent specializations that strip_typedefs
// left alone, or untouched specializations because
// coerce_template_parms returns the unconverted template
// arguments if it sees incomplete argument packs.
tree ti = TYPE_ALIAS_TEMPLATE_INFO (arg);
return hash_tmpl_and_args (TI_TEMPLATE (ti), TI_ARGS (ti));
}
if (TYPE_CANONICAL (arg))
return iterative_hash_object (TYPE_HASH (TYPE_CANONICAL (arg)),
val);
else if (TREE_CODE (arg) == DECLTYPE_TYPE)
return iterative_hash_template_arg (DECLTYPE_TYPE_EXPR (arg), val);
/* Otherwise just compare the types during lookup. */
return val;
case tcc_declaration:
case tcc_constant:
return iterative_hash_expr (arg, val);
default:
gcc_assert (IS_EXPR_CODE_CLASS (tclass));
{
unsigned n = cp_tree_operand_length (arg);
for (i = 0; i < n; ++i)
val = iterative_hash_template_arg (TREE_OPERAND (arg, i), val);
return val;
}
}
gcc_unreachable ();
return 0;
}
/* Unregister the specialization SPEC as a specialization of TMPL.
Replace it with NEW_SPEC, if NEW_SPEC is non-NULL. Returns true
if the SPEC was listed as a specialization of TMPL.
Note that SPEC has been ggc_freed, so we can't look inside it. */
bool
reregister_specialization (tree spec, tree tinfo, tree new_spec)
{
spec_entry *entry;
spec_entry elt;
elt.tmpl = most_general_template (TI_TEMPLATE (tinfo));
elt.args = TI_ARGS (tinfo);
elt.spec = NULL_TREE;
entry = decl_specializations->find (&elt);
if (entry != NULL)
{
gcc_assert (entry->spec == spec || entry->spec == new_spec);
gcc_assert (new_spec != NULL_TREE);
entry->spec = new_spec;
return 1;
}
return 0;
}
/* Like register_specialization, but for local declarations. We are
registering SPEC, an instantiation of TMPL. */
void
register_local_specialization (tree spec, tree tmpl)
{
local_specializations->put (tmpl, spec);
}
/* TYPE is a class type. Returns true if TYPE is an explicitly
specialized class. */
bool
explicit_class_specialization_p (tree type)
{
if (!CLASSTYPE_TEMPLATE_SPECIALIZATION (type))
return false;
return !uses_template_parms (CLASSTYPE_TI_ARGS (type));
}
/* Print the list of functions at FNS, going through all the overloads
for each element of the list. Alternatively, FNS can not be a
TREE_LIST, in which case it will be printed together with all the
overloads.
MORE and *STR should respectively be FALSE and NULL when the function
is called from the outside. They are used internally on recursive
calls. print_candidates manages the two parameters and leaves NULL
in *STR when it ends. */
static void
print_candidates_1 (tree fns, bool more, const char **str)
{
tree fn, fn2;
char *spaces = NULL;
for (fn = fns; fn; fn = OVL_NEXT (fn))
if (TREE_CODE (fn) == TREE_LIST)
{
for (fn2 = fn; fn2 != NULL_TREE; fn2 = TREE_CHAIN (fn2))
print_candidates_1 (TREE_VALUE (fn2),
TREE_CHAIN (fn2) || more, str);
}
else
{
tree cand = OVL_CURRENT (fn);
if (!*str)
{
/* Pick the prefix string. */
if (!more && !OVL_NEXT (fns))
{
inform (DECL_SOURCE_LOCATION (cand),
"candidate is: %#D", cand);
continue;
}
*str = _("candidates are:");
spaces = get_spaces (*str);
}
inform (DECL_SOURCE_LOCATION (cand), "%s %#D", *str, cand);
*str = spaces ? spaces : *str;
}
if (!more)
{
free (spaces);
*str = NULL;
}
}
/* Print the list of candidate FNS in an error message. FNS can also
be a TREE_LIST of non-functions in the case of an ambiguous lookup. */
void
print_candidates (tree fns)
{
const char *str = NULL;
print_candidates_1 (fns, false, &str);
gcc_assert (str == NULL);
}
/* Get a (possibly) constrained template declaration for the
purpose of ordering candidates. */
static tree
get_template_for_ordering (tree list)
{
gcc_assert (TREE_CODE (list) == TREE_LIST);
tree f = TREE_VALUE (list);
if (tree ti = DECL_TEMPLATE_INFO (f))
return TI_TEMPLATE (ti);
return f;
}
/* Among candidates having the same signature, return the
most constrained or NULL_TREE if there is no best candidate.
If the signatures of candidates vary (e.g., template
specialization vs. member function), then there can be no
most constrained.
Note that we don't compare constraints on the functions
themselves, but rather those of their templates. */
static tree
most_constrained_function (tree candidates)
{
// Try to find the best candidate in a first pass.
tree champ = candidates;
for (tree c = TREE_CHAIN (champ); c; c = TREE_CHAIN (c))
{
int winner = more_constrained (get_template_for_ordering (champ),
get_template_for_ordering (c));
if (winner == -1)
champ = c; // The candidate is more constrained
else if (winner == 0)
return NULL_TREE; // Neither is more constrained
}
// Verify that the champ is better than previous candidates.
for (tree c = candidates; c != champ; c = TREE_CHAIN (c)) {
if (!more_constrained (get_template_for_ordering (champ),
get_template_for_ordering (c)))
return NULL_TREE;
}
return champ;
}
/* Returns the template (one of the functions given by TEMPLATE_ID)
which can be specialized to match the indicated DECL with the
explicit template args given in TEMPLATE_ID. The DECL may be
NULL_TREE if none is available. In that case, the functions in
TEMPLATE_ID are non-members.
If NEED_MEMBER_TEMPLATE is nonzero the function is known to be a
specialization of a member template.
The TEMPLATE_COUNT is the number of references to qualifying
template classes that appeared in the name of the function. See
check_explicit_specialization for a more accurate description.
TSK indicates what kind of template declaration (if any) is being
declared. TSK_TEMPLATE indicates that the declaration given by
DECL, though a FUNCTION_DECL, has template parameters, and is
therefore a template function.
The template args (those explicitly specified and those deduced)
are output in a newly created vector *TARGS_OUT.
If it is impossible to determine the result, an error message is
issued. The error_mark_node is returned to indicate failure. */
static tree
determine_specialization (tree template_id,
tree decl,
tree* targs_out,
int need_member_template,
int template_count,
tmpl_spec_kind tsk)
{
tree fns;
tree targs;
tree explicit_targs;
tree candidates = NULL_TREE;
/* A TREE_LIST of templates of which DECL may be a specialization.
The TREE_VALUE of each node is a TEMPLATE_DECL. The
corresponding TREE_PURPOSE is the set of template arguments that,
when used to instantiate the template, would produce a function
with the signature of DECL. */
tree templates = NULL_TREE;
int header_count;
cp_binding_level *b;
*targs_out = NULL_TREE;
if (template_id == error_mark_node || decl == error_mark_node)
return error_mark_node;
/* We shouldn't be specializing a member template of an
unspecialized class template; we already gave an error in
check_specialization_scope, now avoid crashing. */
if (!VAR_P (decl)
&& template_count && DECL_CLASS_SCOPE_P (decl)
&& template_class_depth (DECL_CONTEXT (decl)) > 0)
{
gcc_assert (errorcount);
return error_mark_node;
}
fns = TREE_OPERAND (template_id, 0);
explicit_targs = TREE_OPERAND (template_id, 1);
if (fns == error_mark_node)
return error_mark_node;
/* Check for baselinks. */
if (BASELINK_P (fns))
fns = BASELINK_FUNCTIONS (fns);
if (TREE_CODE (decl) == FUNCTION_DECL && !is_overloaded_fn (fns))
{
error ("%qD is not a function template", fns);
return error_mark_node;
}
else if (VAR_P (decl) && !variable_template_p (fns))
{
error ("%qD is not a variable template", fns);
return error_mark_node;
}
/* Count the number of template headers specified for this
specialization. */
header_count = 0;
for (b = current_binding_level;
b->kind == sk_template_parms;
b = b->level_chain)
++header_count;
tree orig_fns = fns;
if (variable_template_p (fns))
{
tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (fns));
targs = coerce_template_parms (parms, explicit_targs, fns,
tf_warning_or_error,
/*req_all*/true, /*use_defarg*/true);
if (targs != error_mark_node)
templates = tree_cons (targs, fns, templates);
}
else for (; fns; fns = OVL_NEXT (fns))
{
tree fn = OVL_CURRENT (fns);
if (TREE_CODE (fn) == TEMPLATE_DECL)
{
tree decl_arg_types;
tree fn_arg_types;
tree insttype;
/* In case of explicit specialization, we need to check if
the number of template headers appearing in the specialization
is correct. This is usually done in check_explicit_specialization,
but the check done there cannot be exhaustive when specializing
member functions. Consider the following code:
template <> void A<int>::f(int);
template <> template <> void A<int>::f(int);
Assuming that A<int> is not itself an explicit specialization
already, the first line specializes "f" which is a non-template
member function, whilst the second line specializes "f" which
is a template member function. So both lines are syntactically
correct, and check_explicit_specialization does not reject
them.
Here, we can do better, as we are matching the specialization
against the declarations. We count the number of template
headers, and we check if they match TEMPLATE_COUNT + 1
(TEMPLATE_COUNT is the number of qualifying template classes,
plus there must be another header for the member template
itself).
Notice that if header_count is zero, this is not a
specialization but rather a template instantiation, so there
is no check we can perform here. */
if (header_count && header_count != template_count + 1)
continue;
/* Check that the number of template arguments at the
innermost level for DECL is the same as for FN. */
if (current_binding_level->kind == sk_template_parms
&& !current_binding_level->explicit_spec_p
&& (TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (fn))
!= TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS
(current_template_parms))))
continue;
/* DECL might be a specialization of FN. */
decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn));
/* For a non-static member function, we need to make sure
that the const qualification is the same. Since
get_bindings does not try to merge the "this" parameter,
we must do the comparison explicitly. */
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn))
{
if (!same_type_p (TREE_VALUE (fn_arg_types),
TREE_VALUE (decl_arg_types)))
continue;
/* And the ref-qualification. */
if (type_memfn_rqual (TREE_TYPE (decl))
!= type_memfn_rqual (TREE_TYPE (fn)))
continue;
}
/* Skip the "this" parameter and, for constructors of
classes with virtual bases, the VTT parameter. A
full specialization of a constructor will have a VTT
parameter, but a template never will. */
decl_arg_types
= skip_artificial_parms_for (decl, decl_arg_types);
fn_arg_types
= skip_artificial_parms_for (fn, fn_arg_types);
/* Function templates cannot be specializations; there are
no partial specializations of functions. Therefore, if
the type of DECL does not match FN, there is no
match.
Note that it should never be the case that we have both
candidates added here, and for regular member functions
below. */
if (tsk == tsk_template)
{
if (compparms (fn_arg_types, decl_arg_types))
candidates = tree_cons (NULL_TREE, fn, candidates);
continue;
}
/* See whether this function might be a specialization of this
template. Suppress access control because we might be trying
to make this specialization a friend, and we have already done
access control for the declaration of the specialization. */
push_deferring_access_checks (dk_no_check);
targs = get_bindings (fn, decl, explicit_targs, /*check_ret=*/true);
pop_deferring_access_checks ();
if (!targs)
/* We cannot deduce template arguments that when used to
specialize TMPL will produce DECL. */
continue;
/* Remove, from the set of candidates, all those functions
whose constraints are not satisfied. */
if (flag_concepts && !constraints_satisfied_p (fn, targs))
continue;
// Then, try to form the new function type.
insttype = tsubst (TREE_TYPE (fn), targs, tf_fndecl_type, NULL_TREE);
if (insttype == error_mark_node)
continue;
fn_arg_types
= skip_artificial_parms_for (fn, TYPE_ARG_TYPES (insttype));
if (!compparms (fn_arg_types, decl_arg_types))
continue;
/* Save this template, and the arguments deduced. */
templates = tree_cons (targs, fn, templates);
}
else if (need_member_template)
/* FN is an ordinary member function, and we need a
specialization of a member template. */
;
else if (TREE_CODE (fn) != FUNCTION_DECL)
/* We can get IDENTIFIER_NODEs here in certain erroneous
cases. */
;
else if (!DECL_FUNCTION_MEMBER_P (fn))
/* This is just an ordinary non-member function. Nothing can
be a specialization of that. */
;
else if (DECL_ARTIFICIAL (fn))
/* Cannot specialize functions that are created implicitly. */
;
else
{
tree decl_arg_types;
/* This is an ordinary member function. However, since
we're here, we can assume its enclosing class is a
template class. For example,
template <typename T> struct S { void f(); };
template <> void S<int>::f() {}
Here, S<int>::f is a non-template, but S<int> is a
template class. If FN has the same type as DECL, we
might be in business. */
if (!DECL_TEMPLATE_INFO (fn))
/* Its enclosing class is an explicit specialization
of a template class. This is not a candidate. */
continue;
if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)),
TREE_TYPE (TREE_TYPE (fn))))
/* The return types differ. */
continue;
/* Adjust the type of DECL in case FN is a static member. */
decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl));
if (DECL_STATIC_FUNCTION_P (fn)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
decl_arg_types = TREE_CHAIN (decl_arg_types);
if (!compparms (TYPE_ARG_TYPES (TREE_TYPE (fn)),
decl_arg_types))
continue;
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)
&& (type_memfn_rqual (TREE_TYPE (decl))
!= type_memfn_rqual (TREE_TYPE (fn))))
continue;
// If the deduced arguments do not satisfy the constraints,
// this is not a candidate.
if (flag_concepts && !constraints_satisfied_p (fn))
continue;
// Add the candidate.
candidates = tree_cons (NULL_TREE, fn, candidates);
}
}
if (templates && TREE_CHAIN (templates))
{
/* We have:
[temp.expl.spec]
It is possible for a specialization with a given function
signature to be instantiated from more than one function
template. In such cases, explicit specification of the
template arguments must be used to uniquely identify the
function template specialization being specialized.
Note that here, there's no suggestion that we're supposed to
determine which of the candidate templates is most
specialized. However, we, also have:
[temp.func.order]
Partial ordering of overloaded function template
declarations is used in the following contexts to select
the function template to which a function template
specialization refers:
-- when an explicit specialization refers to a function
template.
So, we do use the partial ordering rules, at least for now.
This extension can only serve to make invalid programs valid,
so it's safe. And, there is strong anecdotal evidence that
the committee intended the partial ordering rules to apply;
the EDG front end has that behavior, and John Spicer claims
that the committee simply forgot to delete the wording in
[temp.expl.spec]. */
tree tmpl = most_specialized_instantiation (templates);
if (tmpl != error_mark_node)
{
templates = tmpl;
TREE_CHAIN (templates) = NULL_TREE;
}
}
// Concepts allows multiple declarations of member functions
// with the same signature. Like above, we need to rely on
// on the partial ordering of those candidates to determine which
// is the best.
if (flag_concepts && candidates && TREE_CHAIN (candidates))
{
if (tree cand = most_constrained_function (candidates))
{
candidates = cand;
TREE_CHAIN (cand) = NULL_TREE;
}
}
if (templates == NULL_TREE && candidates == NULL_TREE)
{
error ("template-id %qD for %q+D does not match any template "
"declaration", template_id, decl);
if (header_count && header_count != template_count + 1)
inform (input_location, "saw %d %<template<>%>, need %d for "
"specializing a member function template",
header_count, template_count + 1);
else
print_candidates (orig_fns);
return error_mark_node;
}
else if ((templates && TREE_CHAIN (templates))
|| (candidates && TREE_CHAIN (candidates))
|| (templates && candidates))
{
error ("ambiguous template specialization %qD for %q+D",
template_id, decl);
candidates = chainon (candidates, templates);
print_candidates (candidates);
return error_mark_node;
}
/* We have one, and exactly one, match. */
if (candidates)
{
tree fn = TREE_VALUE (candidates);
*targs_out = copy_node (DECL_TI_ARGS (fn));
// Propagate the candidate's constraints to the declaration.
set_constraints (decl, get_constraints (fn));
/* DECL is a re-declaration or partial instantiation of a template
function. */
if (TREE_CODE (fn) == TEMPLATE_DECL)
return fn;
/* It was a specialization of an ordinary member function in a
template class. */
return DECL_TI_TEMPLATE (fn);
}
/* It was a specialization of a template. */
targs = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (TREE_VALUE (templates)));
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (targs))
{
*targs_out = copy_node (targs);
SET_TMPL_ARGS_LEVEL (*targs_out,
TMPL_ARGS_DEPTH (*targs_out),
TREE_PURPOSE (templates));
}
else
*targs_out = TREE_PURPOSE (templates);
return TREE_VALUE (templates);
}
/* Returns a chain of parameter types, exactly like the SPEC_TYPES,
but with the default argument values filled in from those in the
TMPL_TYPES. */
static tree
copy_default_args_to_explicit_spec_1 (tree spec_types,
tree tmpl_types)
{
tree new_spec_types;
if (!spec_types)
return NULL_TREE;
if (spec_types == void_list_node)
return void_list_node;
/* Substitute into the rest of the list. */
new_spec_types =
copy_default_args_to_explicit_spec_1 (TREE_CHAIN (spec_types),
TREE_CHAIN (tmpl_types));
/* Add the default argument for this parameter. */
return hash_tree_cons (TREE_PURPOSE (tmpl_types),
TREE_VALUE (spec_types),
new_spec_types);
}
/* DECL is an explicit specialization. Replicate default arguments
from the template it specializes. (That way, code like:
template <class T> void f(T = 3);
template <> void f(double);
void g () { f (); }
works, as required.) An alternative approach would be to look up
the correct default arguments at the call-site, but this approach
is consistent with how implicit instantiations are handled. */
static void
copy_default_args_to_explicit_spec (tree decl)
{
tree tmpl;
tree spec_types;
tree tmpl_types;
tree new_spec_types;
tree old_type;
tree new_type;
tree t;
tree object_type = NULL_TREE;
tree in_charge = NULL_TREE;
tree vtt = NULL_TREE;
/* See if there's anything we need to do. */
tmpl = DECL_TI_TEMPLATE (decl);
tmpl_types = TYPE_ARG_TYPES (TREE_TYPE (DECL_TEMPLATE_RESULT (tmpl)));
for (t = tmpl_types; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t))
break;
if (!t)
return;
old_type = TREE_TYPE (decl);
spec_types = TYPE_ARG_TYPES (old_type);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
{
/* Remove the this pointer, but remember the object's type for
CV quals. */
object_type = TREE_TYPE (TREE_VALUE (spec_types));
spec_types = TREE_CHAIN (spec_types);
tmpl_types = TREE_CHAIN (tmpl_types);
if (DECL_HAS_IN_CHARGE_PARM_P (decl))
{
/* DECL may contain more parameters than TMPL due to the extra
in-charge parameter in constructors and destructors. */
in_charge = spec_types;
spec_types = TREE_CHAIN (spec_types);
}
if (DECL_HAS_VTT_PARM_P (decl))
{
vtt = spec_types;
spec_types = TREE_CHAIN (spec_types);
}
}
/* Compute the merged default arguments. */
new_spec_types =
copy_default_args_to_explicit_spec_1 (spec_types, tmpl_types);
/* Compute the new FUNCTION_TYPE. */
if (object_type)
{
if (vtt)
new_spec_types = hash_tree_cons (TREE_PURPOSE (vtt),
TREE_VALUE (vtt),
new_spec_types);
if (in_charge)
/* Put the in-charge parameter back. */
new_spec_types = hash_tree_cons (TREE_PURPOSE (in_charge),
TREE_VALUE (in_charge),
new_spec_types);
new_type = build_method_type_directly (object_type,
TREE_TYPE (old_type),
new_spec_types);
}
else
new_type = build_function_type (TREE_TYPE (old_type),
new_spec_types);
new_type = cp_build_type_attribute_variant (new_type,
TYPE_ATTRIBUTES (old_type));
new_type = build_exception_variant (new_type,
TYPE_RAISES_EXCEPTIONS (old_type));
if (TYPE_HAS_LATE_RETURN_TYPE (old_type))
TYPE_HAS_LATE_RETURN_TYPE (new_type) = 1;
TREE_TYPE (decl) = new_type;
}
/* Return the number of template headers we expect to see for a definition
or specialization of CTYPE or one of its non-template members. */
int
num_template_headers_for_class (tree ctype)
{
int num_templates = 0;
while (ctype && CLASS_TYPE_P (ctype))
{
/* You're supposed to have one `template <...>' for every
template class, but you don't need one for a full
specialization. For example:
template <class T> struct S{};
template <> struct S<int> { void f(); };
void S<int>::f () {}
is correct; there shouldn't be a `template <>' for the
definition of `S<int>::f'. */
if (!CLASSTYPE_TEMPLATE_INFO (ctype))
/* If CTYPE does not have template information of any
kind, then it is not a template, nor is it nested
within a template. */
break;
if (explicit_class_specialization_p (ctype))
break;
if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (ctype)))
++num_templates;
ctype = TYPE_CONTEXT (ctype);
}
return num_templates;
}
/* Do a simple sanity check on the template headers that precede the
variable declaration DECL. */
void
check_template_variable (tree decl)
{
tree ctx = CP_DECL_CONTEXT (decl);
int wanted = num_template_headers_for_class (ctx);
if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)))
{
if (cxx_dialect < cxx14)
pedwarn (DECL_SOURCE_LOCATION (decl), 0,
"variable templates only available with "
"-std=c++14 or -std=gnu++14");
// Namespace-scope variable templates should have a template header.
++wanted;
}
if (template_header_count > wanted)
{
bool warned = pedwarn (DECL_SOURCE_LOCATION (decl), 0,
"too many template headers for %D (should be %d)",
decl, wanted);
if (warned && CLASS_TYPE_P (ctx)
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (ctx))
inform (DECL_SOURCE_LOCATION (decl),
"members of an explicitly specialized class are defined "
"without a template header");
}
}
/* An explicit specialization whose declarator-id or class-head-name is not
qualified shall be declared in the nearest enclosing namespace of the
template, or, if the namespace is inline (7.3.1), any namespace from its
enclosing namespace set.
If the name declared in the explicit instantiation is an unqualified name,
the explicit instantiation shall appear in the namespace where its template
is declared or, if that namespace is inline (7.3.1), any namespace from its
enclosing namespace set. */
void
check_unqualified_spec_or_inst (tree t, location_t loc)
{
tree tmpl = most_general_template (t);
if (DECL_NAMESPACE_SCOPE_P (tmpl)
&& !is_associated_namespace (current_namespace,
CP_DECL_CONTEXT (tmpl)))
{
if (processing_specialization)
permerror (loc, "explicit specialization of %qD outside its "
"namespace must use a nested-name-specifier", tmpl);
else if (processing_explicit_instantiation
&& cxx_dialect >= cxx11)
/* This was allowed in C++98, so only pedwarn. */
pedwarn (loc, OPT_Wpedantic, "explicit instantiation of %qD "
"outside its namespace must use a nested-name-"
"specifier", tmpl);
}
}
/* Check to see if the function just declared, as indicated in
DECLARATOR, and in DECL, is a specialization of a function
template. We may also discover that the declaration is an explicit
instantiation at this point.
Returns DECL, or an equivalent declaration that should be used
instead if all goes well. Issues an error message if something is
amiss. Returns error_mark_node if the error is not easily
recoverable.
FLAGS is a bitmask consisting of the following flags:
2: The function has a definition.
4: The function is a friend.
The TEMPLATE_COUNT is the number of references to qualifying
template classes that appeared in the name of the function. For
example, in
template <class T> struct S { void f(); };
void S<int>::f();
the TEMPLATE_COUNT would be 1. However, explicitly specialized
classes are not counted in the TEMPLATE_COUNT, so that in
template <class T> struct S {};
template <> struct S<int> { void f(); }
template <> void S<int>::f();
the TEMPLATE_COUNT would be 0. (Note that this declaration is
invalid; there should be no template <>.)
If the function is a specialization, it is marked as such via
DECL_TEMPLATE_SPECIALIZATION. Furthermore, its DECL_TEMPLATE_INFO
is set up correctly, and it is added to the list of specializations
for that template. */
tree
check_explicit_specialization (tree declarator,
tree decl,
int template_count,
int flags)
{
int have_def = flags & 2;
int is_friend = flags & 4;
bool is_concept = flags & 8;
int specialization = 0;
int explicit_instantiation = 0;
int member_specialization = 0;
tree ctype = DECL_CLASS_CONTEXT (decl);
tree dname = DECL_NAME (decl);
tmpl_spec_kind tsk;
if (is_friend)
{
if (!processing_specialization)
tsk = tsk_none;
else
tsk = tsk_excessive_parms;
}
else
tsk = current_tmpl_spec_kind (template_count);
switch (tsk)
{
case tsk_none:
if (processing_specialization && !VAR_P (decl))
{
specialization = 1;
SET_DECL_TEMPLATE_SPECIALIZATION (decl);
}
else if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR)
{
if (is_friend)
/* This could be something like:
template <class T> void f(T);
class S { friend void f<>(int); } */
specialization = 1;
else
{
/* This case handles bogus declarations like template <>
template <class T> void f<int>(); */
error ("template-id %qD in declaration of primary template",
declarator);
return decl;
}
}
break;
case tsk_invalid_member_spec:
/* The error has already been reported in
check_specialization_scope. */
return error_mark_node;
case tsk_invalid_expl_inst:
error ("template parameter list used in explicit instantiation");
/* Fall through. */
case tsk_expl_inst:
if (have_def)
error ("definition provided for explicit instantiation");
explicit_instantiation = 1;
break;
case tsk_excessive_parms:
case tsk_insufficient_parms:
if (tsk == tsk_excessive_parms)
error ("too many template parameter lists in declaration of %qD",
decl);
else if (template_header_count)
error("too few template parameter lists in declaration of %qD", decl);
else
error("explicit specialization of %qD must be introduced by "
"%<template <>%>", decl);
/* Fall through. */
case tsk_expl_spec:
if (is_concept)
error ("explicit specialization declared %<concept%>");
if (VAR_P (decl) && TREE_CODE (declarator) != TEMPLATE_ID_EXPR)
/* In cases like template<> constexpr bool v = true;
We'll give an error in check_template_variable. */
break;
SET_DECL_TEMPLATE_SPECIALIZATION (decl);
if (ctype)
member_specialization = 1;
else
specialization = 1;
break;
case tsk_template:
if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR)
{
/* This case handles bogus declarations like template <>
template <class T> void f<int>(); */
if (!uses_template_parms (declarator))
error ("template-id %qD in declaration of primary template",
declarator);
else if (variable_template_p (TREE_OPERAND (declarator, 0)))
{
/* Partial specialization of variable template. */
SET_DECL_TEMPLATE_SPECIALIZATION (decl);
specialization = 1;
goto ok;
}
else if (cxx_dialect < cxx14)
error ("non-type partial specialization %qD "
"is not allowed", declarator);
else
error ("non-class, non-variable partial specialization %qD "
"is not allowed", declarator);
return decl;
ok:;
}
if (ctype && CLASSTYPE_TEMPLATE_INSTANTIATION (ctype))
/* This is a specialization of a member template, without
specialization the containing class. Something like:
template <class T> struct S {
template <class U> void f (U);
};
template <> template <class U> void S<int>::f(U) {}
That's a specialization -- but of the entire template. */
specialization = 1;
break;
default:
gcc_unreachable ();
}
if ((specialization || member_specialization)
/* This doesn't apply to variable templates. */
&& (TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE))
{
tree t = TYPE_ARG_TYPES (TREE_TYPE (decl));
for (; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t))
{
permerror (input_location,
"default argument specified in explicit specialization");
break;
}
}
if (specialization || member_specialization || explicit_instantiation)
{
tree tmpl = NULL_TREE;
tree targs = NULL_TREE;
bool was_template_id = (TREE_CODE (declarator) == TEMPLATE_ID_EXPR);
/* Make sure that the declarator is a TEMPLATE_ID_EXPR. */
if (!was_template_id)
{
tree fns;
gcc_assert (identifier_p (declarator));
if (ctype)
fns = dname;
else
{
/* If there is no class context, the explicit instantiation
must be at namespace scope. */
gcc_assert (DECL_NAMESPACE_SCOPE_P (decl));
/* Find the namespace binding, using the declaration
context. */
fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
false, true);
if (fns == error_mark_node)
/* If lookup fails, look for a friend declaration so we can
give a better diagnostic. */
fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname,
/*type*/false, /*complain*/true,
/*hidden*/true);
if (fns == error_mark_node || !is_overloaded_fn (fns))
{
error ("%qD is not a template function", dname);
fns = error_mark_node;
}
}
declarator = lookup_template_function (fns, NULL_TREE);
}
if (declarator == error_mark_node)
return error_mark_node;
if (ctype != NULL_TREE && TYPE_BEING_DEFINED (ctype))
{
if (!explicit_instantiation)
/* A specialization in class scope. This is invalid,
but the error will already have been flagged by
check_specialization_scope. */
return error_mark_node;
else
{
/* It's not valid to write an explicit instantiation in
class scope, e.g.:
class C { template void f(); }
This case is caught by the parser. However, on
something like:
template class C { void f(); };
(which is invalid) we can get here. The error will be
issued later. */
;
}
return decl;
}
else if (ctype != NULL_TREE
&& (identifier_p (TREE_OPERAND (declarator, 0))))
{
// We'll match variable templates in start_decl.
if (VAR_P (decl))
return decl;
/* Find the list of functions in ctype that have the same
name as the declared function. */
tree name = TREE_OPERAND (declarator, 0);
tree fns = NULL_TREE;
int idx;
if (constructor_name_p (name, ctype))
{
int is_constructor = DECL_CONSTRUCTOR_P (decl);
if (is_constructor ? !TYPE_HAS_USER_CONSTRUCTOR (ctype)
: !CLASSTYPE_DESTRUCTORS (ctype))
{
/* From [temp.expl.spec]:
If such an explicit specialization for the member
of a class template names an implicitly-declared
special member function (clause _special_), the
program is ill-formed.
Similar language is found in [temp.explicit]. */
error ("specialization of implicitly-declared special member function");
return error_mark_node;
}
name = is_constructor ? ctor_identifier : dtor_identifier;
}
if (!DECL_CONV_FN_P (decl))
{
idx = lookup_fnfields_1 (ctype, name);
if (idx >= 0)
fns = (*CLASSTYPE_METHOD_VEC (ctype))[idx];
}
else
{
vec<tree, va_gc> *methods;
tree ovl;
/* For a type-conversion operator, we cannot do a
name-based lookup. We might be looking for `operator
int' which will be a specialization of `operator T'.
So, we find *all* the conversion operators, and then
select from them. */
fns = NULL_TREE;
methods = CLASSTYPE_METHOD_VEC (ctype);
if (methods)
for (idx = CLASSTYPE_FIRST_CONVERSION_SLOT;
methods->iterate (idx, &ovl);
++idx)
{
if (!DECL_CONV_FN_P (OVL_CURRENT (ovl)))
/* There are no more conversion functions. */
break;
/* Glue all these conversion functions together
with those we already have. */
for (; ovl; ovl = OVL_NEXT (ovl))
fns = ovl_cons (OVL_CURRENT (ovl), fns);
}
}
if (fns == NULL_TREE)
{
error ("no member function %qD declared in %qT", name, ctype);
return error_mark_node;
}
else
TREE_OPERAND (declarator, 0) = fns;
}
/* Figure out what exactly is being specialized at this point.
Note that for an explicit instantiation, even one for a
member function, we cannot tell a priori whether the
instantiation is for a member template, or just a member
function of a template class. Even if a member template is
being instantiated, the member template arguments may be
elided if they can be deduced from the rest of the
declaration. */
tmpl = determine_specialization (declarator, decl,
&targs,
member_specialization,
template_count,
tsk);
if (!tmpl || tmpl == error_mark_node)
/* We couldn't figure out what this declaration was
specializing. */
return error_mark_node;
else
{
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_HIDDEN_FRIEND_P (tmpl))
{
if (pedwarn (DECL_SOURCE_LOCATION (decl), 0,
"friend declaration %qD is not visible to "
"explicit specialization", tmpl))
inform (DECL_SOURCE_LOCATION (tmpl),
"friend declaration here");
}
else if (!ctype && !is_friend
&& CP_DECL_CONTEXT (decl) == current_namespace)
check_unqualified_spec_or_inst (tmpl, DECL_SOURCE_LOCATION (decl));
tree gen_tmpl = most_general_template (tmpl);
if (explicit_instantiation)
{
/* We don't set DECL_EXPLICIT_INSTANTIATION here; that
is done by do_decl_instantiation later. */
int arg_depth = TMPL_ARGS_DEPTH (targs);
int parm_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl));
if (arg_depth > parm_depth)
{
/* If TMPL is not the most general template (for
example, if TMPL is a friend template that is
injected into namespace scope), then there will
be too many levels of TARGS. Remove some of them
here. */
int i;
tree new_targs;
new_targs = make_tree_vec (parm_depth);
for (i = arg_depth - parm_depth; i < arg_depth; ++i)
TREE_VEC_ELT (new_targs, i - (arg_depth - parm_depth))
= TREE_VEC_ELT (targs, i);
targs = new_targs;
}
return instantiate_template (tmpl, targs, tf_error);
}
/* If we thought that the DECL was a member function, but it
turns out to be specializing a static member function,
make DECL a static member function as well. */
if (DECL_FUNCTION_TEMPLATE_P (tmpl)
&& DECL_STATIC_FUNCTION_P (tmpl)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (decl))
revert_static_member_fn (decl);
/* If this is a specialization of a member template of a
template class, we want to return the TEMPLATE_DECL, not
the specialization of it. */
if (tsk == tsk_template && !was_template_id)
{
tree result = DECL_TEMPLATE_RESULT (tmpl);
SET_DECL_TEMPLATE_SPECIALIZATION (tmpl);
DECL_INITIAL (result) = NULL_TREE;
if (have_def)
{
tree parm;
DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl);
DECL_SOURCE_LOCATION (result)
= DECL_SOURCE_LOCATION (decl);
/* We want to use the argument list specified in the
definition, not in the original declaration. */
DECL_ARGUMENTS (result) = DECL_ARGUMENTS (decl);
for (parm = DECL_ARGUMENTS (result); parm;
parm = DECL_CHAIN (parm))
DECL_CONTEXT (parm) = result;
}
return register_specialization (tmpl, gen_tmpl, targs,
is_friend, 0);
}
/* Set up the DECL_TEMPLATE_INFO for DECL. */
DECL_TEMPLATE_INFO (decl) = build_template_info (tmpl, targs);
if (was_template_id)
TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)) = true;
/* Inherit default function arguments from the template
DECL is specializing. */
if (DECL_FUNCTION_TEMPLATE_P (tmpl))
copy_default_args_to_explicit_spec (decl);
/* This specialization has the same protection as the
template it specializes. */
TREE_PRIVATE (decl) = TREE_PRIVATE (gen_tmpl);
TREE_PROTECTED (decl) = TREE_PROTECTED (gen_tmpl);
/* 7.1.1-1 [dcl.stc]
A storage-class-specifier shall not be specified in an
explicit specialization...
The parser rejects these, so unless action is taken here,
explicit function specializations will always appear with
global linkage.
The action recommended by the C++ CWG in response to C++
defect report 605 is to make the storage class and linkage
of the explicit specialization match the templated function:
http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#605
*/
if (tsk == tsk_expl_spec && DECL_FUNCTION_TEMPLATE_P (gen_tmpl))
{
tree tmpl_func = DECL_TEMPLATE_RESULT (gen_tmpl);
gcc_assert (TREE_CODE (tmpl_func) == FUNCTION_DECL);
/* A concept cannot be specialized. */
if (DECL_DECLARED_CONCEPT_P (tmpl_func))
{
error ("explicit specialization of function concept %qD",
gen_tmpl);
return error_mark_node;
}
/* This specialization has the same linkage and visibility as
the function template it specializes. */
TREE_PUBLIC (decl) = TREE_PUBLIC (tmpl_func);
if (! TREE_PUBLIC (decl))
{
DECL_INTERFACE_KNOWN (decl) = 1;
DECL_NOT_REALLY_EXTERN (decl) = 1;
}
DECL_THIS_STATIC (decl) = DECL_THIS_STATIC (tmpl_func);
if (DECL_VISIBILITY_SPECIFIED (tmpl_func))
{
DECL_VISIBILITY_SPECIFIED (decl) = 1;
DECL_VISIBILITY (decl) = DECL_VISIBILITY (tmpl_func);
}
}
/* If DECL is a friend declaration, declared using an
unqualified name, the namespace associated with DECL may
have been set incorrectly. For example, in:
template <typename T> void f(T);
namespace N {
struct S { friend void f<int>(int); }
}
we will have set the DECL_CONTEXT for the friend
declaration to N, rather than to the global namespace. */
if (DECL_NAMESPACE_SCOPE_P (decl))
DECL_CONTEXT (decl) = DECL_CONTEXT (tmpl);
if (is_friend && !have_def)
/* This is not really a declaration of a specialization.
It's just the name of an instantiation. But, it's not
a request for an instantiation, either. */
SET_DECL_IMPLICIT_INSTANTIATION (decl);
else if (TREE_CODE (decl) == FUNCTION_DECL)
/* A specialization is not necessarily COMDAT. */
DECL_COMDAT (decl) = (TREE_PUBLIC (decl)
&& DECL_DECLARED_INLINE_P (decl));
else if (VAR_P (decl))
DECL_COMDAT (decl) = false;
/* If this is a full specialization, register it so that we can find
it again. Partial specializations will be registered in
process_partial_specialization. */
if (!processing_template_decl)
decl = register_specialization (decl, gen_tmpl, targs,
is_friend, 0);
/* A 'structor should already have clones. */
gcc_assert (decl == error_mark_node
|| variable_template_p (tmpl)
|| !(DECL_CONSTRUCTOR_P (decl)
|| DECL_DESTRUCTOR_P (decl))
|| DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl)));
}
}
return decl;
}
/* Returns 1 iff PARMS1 and PARMS2 are identical sets of template
parameters. These are represented in the same format used for
DECL_TEMPLATE_PARMS. */
int
comp_template_parms (const_tree parms1, const_tree parms2)
{
const_tree p1;
const_tree p2;
if (parms1 == parms2)
return 1;
for (p1 = parms1, p2 = parms2;
p1 != NULL_TREE && p2 != NULL_TREE;
p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2))
{
tree t1 = TREE_VALUE (p1);
tree t2 = TREE_VALUE (p2);
int i;
gcc_assert (TREE_CODE (t1) == TREE_VEC);
gcc_assert (TREE_CODE (t2) == TREE_VEC);
if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2))
return 0;
for (i = 0; i < TREE_VEC_LENGTH (t2); ++i)
{
tree parm1 = TREE_VALUE (TREE_VEC_ELT (t1, i));
tree parm2 = TREE_VALUE (TREE_VEC_ELT (t2, i));
/* If either of the template parameters are invalid, assume
they match for the sake of error recovery. */
if (error_operand_p (parm1) || error_operand_p (parm2))
return 1;
if (TREE_CODE (parm1) != TREE_CODE (parm2))
return 0;
if (TREE_CODE (parm1) == TEMPLATE_TYPE_PARM
&& (TEMPLATE_TYPE_PARAMETER_PACK (parm1)
== TEMPLATE_TYPE_PARAMETER_PACK (parm2)))
continue;
else if (!same_type_p (TREE_TYPE (parm1), TREE_TYPE (parm2)))
return 0;
}
}
if ((p1 != NULL_TREE) != (p2 != NULL_TREE))
/* One set of parameters has more parameters lists than the
other. */
return 0;
return 1;
}
/* Determine whether PARM is a parameter pack. */
bool
template_parameter_pack_p (const_tree parm)
{
/* Determine if we have a non-type template parameter pack. */
if (TREE_CODE (parm) == PARM_DECL)
return (DECL_TEMPLATE_PARM_P (parm)
&& TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)));
if (TREE_CODE (parm) == TEMPLATE_PARM_INDEX)
return TEMPLATE_PARM_PARAMETER_PACK (parm);
/* If this is a list of template parameters, we could get a
TYPE_DECL or a TEMPLATE_DECL. */
if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL)
parm = TREE_TYPE (parm);
/* Otherwise it must be a type template parameter. */
return ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM)
&& TEMPLATE_TYPE_PARAMETER_PACK (parm));
}
/* Determine if T is a function parameter pack. */
bool
function_parameter_pack_p (const_tree t)
{
if (t && TREE_CODE (t) == PARM_DECL)
return DECL_PACK_P (t);
return false;
}
/* Return the function template declaration of PRIMARY_FUNC_TMPL_INST.
PRIMARY_FUNC_TMPL_INST is a primary function template instantiation. */
tree
get_function_template_decl (const_tree primary_func_tmpl_inst)
{
if (! primary_func_tmpl_inst
|| TREE_CODE (primary_func_tmpl_inst) != FUNCTION_DECL
|| ! primary_template_instantiation_p (primary_func_tmpl_inst))
return NULL;
return DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (primary_func_tmpl_inst));
}
/* Return true iff the function parameter PARAM_DECL was expanded
from the function parameter pack PACK. */
bool
function_parameter_expanded_from_pack_p (tree param_decl, tree pack)
{
if (DECL_ARTIFICIAL (param_decl)
|| !function_parameter_pack_p (pack))
return false;
/* The parameter pack and its pack arguments have the same
DECL_PARM_INDEX. */
return DECL_PARM_INDEX (pack) == DECL_PARM_INDEX (param_decl);
}
/* Determine whether ARGS describes a variadic template args list,
i.e., one that is terminated by a template argument pack. */
static bool
template_args_variadic_p (tree args)
{
int nargs;
tree last_parm;
if (args == NULL_TREE)
return false;
args = INNERMOST_TEMPLATE_ARGS (args);
nargs = TREE_VEC_LENGTH (args);
if (nargs == 0)
return false;
last_parm = TREE_VEC_ELT (args, nargs - 1);
return ARGUMENT_PACK_P (last_parm);
}
/* Generate a new name for the parameter pack name NAME (an
IDENTIFIER_NODE) that incorporates its */
static tree
make_ith_pack_parameter_name (tree name, int i)
{
/* Munge the name to include the parameter index. */
#define NUMBUF_LEN 128
char numbuf[NUMBUF_LEN];
char* newname;
int newname_len;
if (name == NULL_TREE)
return name;
snprintf (numbuf, NUMBUF_LEN, "%i", i);
newname_len = IDENTIFIER_LENGTH (name)
+ strlen (numbuf) + 2;
newname = (char*)alloca (newname_len);
snprintf (newname, newname_len,
"%s#%i", IDENTIFIER_POINTER (name), i);
return get_identifier (newname);
}
/* Return true if T is a primary function, class or alias template
instantiation. */
bool
primary_template_instantiation_p (const_tree t)
{
if (!t)
return false;
if (TREE_CODE (t) == FUNCTION_DECL)
return DECL_LANG_SPECIFIC (t)
&& DECL_TEMPLATE_INSTANTIATION (t)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (t));
else if (CLASS_TYPE_P (t) && !TYPE_DECL_ALIAS_P (TYPE_NAME (t)))
return CLASSTYPE_TEMPLATE_INSTANTIATION (t)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t));
else if (alias_template_specialization_p (t))
return true;
return false;
}
/* Return true if PARM is a template template parameter. */
bool
template_template_parameter_p (const_tree parm)
{
return DECL_TEMPLATE_TEMPLATE_PARM_P (parm);
}
/* Return true iff PARM is a DECL representing a type template
parameter. */
bool
template_type_parameter_p (const_tree parm)
{
return (parm
&& (TREE_CODE (parm) == TYPE_DECL
|| TREE_CODE (parm) == TEMPLATE_DECL)
&& DECL_TEMPLATE_PARM_P (parm));
}
/* Return the template parameters of T if T is a
primary template instantiation, NULL otherwise. */
tree
get_primary_template_innermost_parameters (const_tree t)
{
tree parms = NULL, template_info = NULL;
if ((template_info = get_template_info (t))
&& primary_template_instantiation_p (t))
parms = INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (TI_TEMPLATE (template_info)));
return parms;
}
/* Return the template parameters of the LEVELth level from the full list
of template parameters PARMS. */
tree
get_template_parms_at_level (tree parms, int level)
{
tree p;
if (!parms
|| TREE_CODE (parms) != TREE_LIST
|| level > TMPL_PARMS_DEPTH (parms))
return NULL_TREE;
for (p = parms; p; p = TREE_CHAIN (p))
if (TMPL_PARMS_DEPTH (p) == level)
return p;
return NULL_TREE;
}
/* Returns the template arguments of T if T is a template instantiation,
NULL otherwise. */
tree
get_template_innermost_arguments (const_tree t)
{
tree args = NULL, template_info = NULL;
if ((template_info = get_template_info (t))
&& TI_ARGS (template_info))
args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (template_info));
return args;
}
/* Return the argument pack elements of T if T is a template argument pack,
NULL otherwise. */
tree
get_template_argument_pack_elems (const_tree t)
{
if (TREE_CODE (t) != TYPE_ARGUMENT_PACK
&& TREE_CODE (t) != NONTYPE_ARGUMENT_PACK)
return NULL;
return ARGUMENT_PACK_ARGS (t);
}
/* Structure used to track the progress of find_parameter_packs_r. */
struct find_parameter_pack_data
{
/* TREE_LIST that will contain all of the parameter packs found by
the traversal. */
tree* parameter_packs;
/* Set of AST nodes that have been visited by the traversal. */
hash_set<tree> *visited;
/* True iff we're making a type pack expansion. */
bool type_pack_expansion_p;
};
/* Identifies all of the argument packs that occur in a template
argument and appends them to the TREE_LIST inside DATA, which is a
find_parameter_pack_data structure. This is a subroutine of
make_pack_expansion and uses_parameter_packs. */
static tree
find_parameter_packs_r (tree *tp, int *walk_subtrees, void* data)
{
tree t = *tp;
struct find_parameter_pack_data* ppd =
(struct find_parameter_pack_data*)data;
bool parameter_pack_p = false;
/* Handle type aliases/typedefs. */
if (TYPE_ALIAS_P (t))
{
if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t))
cp_walk_tree (&TI_ARGS (tinfo),
&find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
}
/* Identify whether this is a parameter pack or not. */
switch (TREE_CODE (t))
{
case TEMPLATE_PARM_INDEX:
if (TEMPLATE_PARM_PARAMETER_PACK (t))
parameter_pack_p = true;
break;
case TEMPLATE_TYPE_PARM:
t = TYPE_MAIN_VARIANT (t);
/* FALLTHRU */
case TEMPLATE_TEMPLATE_PARM:
/* If the placeholder appears in the decl-specifier-seq of a function
parameter pack (14.6.3), or the type-specifier-seq of a type-id that
is a pack expansion, the invented template parameter is a template
parameter pack. */
if (ppd->type_pack_expansion_p && is_auto_or_concept (t))
TEMPLATE_TYPE_PARAMETER_PACK (t) = true;
if (TEMPLATE_TYPE_PARAMETER_PACK (t))
parameter_pack_p = true;
break;
case FIELD_DECL:
case PARM_DECL:
if (DECL_PACK_P (t))
{
/* We don't want to walk into the type of a PARM_DECL,
because we don't want to see the type parameter pack. */
*walk_subtrees = 0;
parameter_pack_p = true;
}
break;
/* Look through a lambda capture proxy to the field pack. */
case VAR_DECL:
if (DECL_HAS_VALUE_EXPR_P (t))
{
tree v = DECL_VALUE_EXPR (t);
cp_walk_tree (&v,
&find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
}
else if (variable_template_specialization_p (t))
{
cp_walk_tree (&DECL_TI_ARGS (t),
find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
}
break;
case BASES:
parameter_pack_p = true;
break;
default:
/* Not a parameter pack. */
break;
}
if (parameter_pack_p)
{
/* Add this parameter pack to the list. */
*ppd->parameter_packs = tree_cons (NULL_TREE, t, *ppd->parameter_packs);
}
if (TYPE_P (t))
cp_walk_tree (&TYPE_CONTEXT (t),
&find_parameter_packs_r, ppd, ppd->visited);
/* This switch statement will return immediately if we don't find a
parameter pack. */
switch (TREE_CODE (t))
{
case TEMPLATE_PARM_INDEX:
return NULL_TREE;
case BOUND_TEMPLATE_TEMPLATE_PARM:
/* Check the template itself. */
cp_walk_tree (&TREE_TYPE (TYPE_TI_TEMPLATE (t)),
&find_parameter_packs_r, ppd, ppd->visited);
/* Check the template arguments. */
cp_walk_tree (&TYPE_TI_ARGS (t), &find_parameter_packs_r, ppd,
ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
return NULL_TREE;
case PARM_DECL:
return NULL_TREE;
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
return NULL_TREE;
/* Fall through. */
case UNION_TYPE:
case ENUMERAL_TYPE:
if (TYPE_TEMPLATE_INFO (t))
cp_walk_tree (&TYPE_TI_ARGS (t),
&find_parameter_packs_r, ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case TEMPLATE_DECL:
if (!DECL_TEMPLATE_TEMPLATE_PARM_P (t))
return NULL_TREE;
gcc_fallthrough();
case CONSTRUCTOR:
cp_walk_tree (&TREE_TYPE (t),
&find_parameter_packs_r, ppd, ppd->visited);
return NULL_TREE;
case TYPENAME_TYPE:
cp_walk_tree (&TYPENAME_TYPE_FULLNAME (t), &find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case TYPE_PACK_EXPANSION:
case EXPR_PACK_EXPANSION:
*walk_subtrees = 0;
return NULL_TREE;
case INTEGER_TYPE:
cp_walk_tree (&TYPE_MAX_VALUE (t), &find_parameter_packs_r,
ppd, ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case IDENTIFIER_NODE:
cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd,
ppd->visited);
*walk_subtrees = 0;
return NULL_TREE;
case DECLTYPE_TYPE:
{
/* When traversing a DECLTYPE_TYPE_EXPR, we need to set
type_pack_expansion_p to false so that any placeholders
within the expression don't get marked as parameter packs. */
bool type_pack_expansion_p = ppd->type_pack_expansion_p;
ppd->type_pack_expansion_p = false;
cp_walk_tree (&DECLTYPE_TYPE_EXPR (t), &find_parameter_packs_r,
ppd, ppd->visited);
ppd->type_pack_expansion_p = type_pack_expansion_p;
*walk_subtrees = 0;
return NULL_TREE;
}
default:
return NULL_TREE;
}
return NULL_TREE;
}
/* Determines if the expression or type T uses any parameter packs. */
bool
uses_parameter_packs (tree t)
{
tree parameter_packs = NULL_TREE;
struct find_parameter_pack_data ppd;
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = false;
cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited);
delete ppd.visited;
return parameter_packs != NULL_TREE;
}
/* Turn ARG, which may be an expression, type, or a TREE_LIST
representation a base-class initializer into a parameter pack
expansion. If all goes well, the resulting node will be an
EXPR_PACK_EXPANSION, TYPE_PACK_EXPANSION, or TREE_LIST,
respectively. */
tree
make_pack_expansion (tree arg)
{
tree result;
tree parameter_packs = NULL_TREE;
bool for_types = false;
struct find_parameter_pack_data ppd;
if (!arg || arg == error_mark_node)
return arg;
if (TREE_CODE (arg) == TREE_LIST && TREE_PURPOSE (arg))
{
/* A TREE_LIST with a non-null TREE_PURPOSE is for a base
class initializer. In this case, the TREE_PURPOSE will be a
_TYPE node (representing the base class expansion we're
initializing) and the TREE_VALUE will be a TREE_LIST
containing the initialization arguments.
The resulting expansion looks somewhat different from most
expansions. Rather than returning just one _EXPANSION, we
return a TREE_LIST whose TREE_PURPOSE is a
TYPE_PACK_EXPANSION containing the bases that will be
initialized. The TREE_VALUE will be identical to the
original TREE_VALUE, which is a list of arguments that will
be passed to each base. We do not introduce any new pack
expansion nodes into the TREE_VALUE (although it is possible
that some already exist), because the TREE_PURPOSE and
TREE_VALUE all need to be expanded together with the same
_EXPANSION node. Note that the TYPE_PACK_EXPANSION in the
resulting TREE_PURPOSE will mention the parameter packs in
both the bases and the arguments to the bases. */
tree purpose;
tree value;
tree parameter_packs = NULL_TREE;
/* Determine which parameter packs will be used by the base
class expansion. */
ppd.visited = new hash_set<tree>;
ppd.parameter_packs = ¶meter_packs;
ppd.type_pack_expansion_p = true;
gcc_assert (TYPE_P (TREE_PURPOSE (arg)));
cp_walk_tree (&TREE_PURPOSE (arg), &find_parameter_packs_r,
&ppd, ppd.visited);
if (parameter_packs == NULL_TREE)
{
error ("base initializer expansion %<%T%> contains no parameter packs", arg);
delete ppd.visited;
return error_mark_node;
}
if (TREE_VALUE (arg) != void_type_node)
{
/* Collect the sets of parameter packs used in each of the
initialization arguments. */
for (value = TREE_VALUE (arg); value; value = TREE_CHAIN (value))
{
/* Determine which parameter packs will be expanded in this
argument. */
cp_walk_tree (&TREE_VALUE (value), &find_parameter_packs_r,
&ppd, ppd.visited);
}
}
delete ppd.visited;
/* Create the pack expansion type for the base type. */
purpose = cxx_make_type (TYPE_PACK_EXPANSION);
SET_PACK_EXPANSION_PATTERN (purpose, TREE_PURPOSE (arg));
PACK_EXPANSION_PARAMETER_PACKS (purpose) = parameter_packs;
/* Just use structural equality for these TYPE_PACK_EXPANSIONS;
they will rarely be compared to anything. */
SET_TYPE_STRUCTURAL_EQUALITY (purpose);
return tree_cons (purpose, TREE_VALUE (arg), NULL_TREE);
}
if (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL)
for_types = true;
/* Build the PACK_EXPANSION_* node. */
result = for_types
? cxx_make_type (TYPE_PACK_EXPANSION)
: make_node (EXPR_PACK_EXPANSION);
SET_PACK_EXPANSION_PATTERN (result, arg);
if (TREE_CODE (result) == EXPR_PACK_EXPANSION)
{
/* Propagate type and const-expression information. */
TREE_TYPE (result) = TREE_TYPE (arg);
TREE_CONSTANT (result) = TREE_CONSTANT (arg);
/* Mark this read now, since the expansion might be length 0. */
mark_exp_read (arg);
}
else
/* Just use structural equality for these TYPE_PACK_EXPANSIONS;
they will rarely be compared to anything. */
SET_TYPE_STRUCTURAL_EQUALITY (result);
/* Determine which parameter packs will be expanded. */
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = TYPE_P (arg);
cp_walk_tree (&arg, &find_parameter_packs_r, &ppd, ppd.visited);
delete ppd.visited;
/* Make sure we found some parameter packs. */
if (parameter_packs == NULL_TREE)
{
if (TYPE_P (arg))
error ("expansion pattern %<%T%> contains no argument packs", arg);
else
error ("expansion pattern %<%E%> contains no argument packs", arg);
return error_mark_node;
}
PACK_EXPANSION_PARAMETER_PACKS (result) = parameter_packs;
PACK_EXPANSION_LOCAL_P (result) = at_function_scope_p ();
return result;
}
/* Checks T for any "bare" parameter packs, which have not yet been
expanded, and issues an error if any are found. This operation can
only be done on full expressions or types (e.g., an expression
statement, "if" condition, etc.), because we could have expressions like:
foo(f(g(h(args)))...)
where "args" is a parameter pack. check_for_bare_parameter_packs
should not be called for the subexpressions args, h(args),
g(h(args)), or f(g(h(args))), because we would produce erroneous
error messages.
Returns TRUE and emits an error if there were bare parameter packs,
returns FALSE otherwise. */
bool
check_for_bare_parameter_packs (tree t)
{
tree parameter_packs = NULL_TREE;
struct find_parameter_pack_data ppd;
if (!processing_template_decl || !t || t == error_mark_node)
return false;
if (TREE_CODE (t) == TYPE_DECL)
t = TREE_TYPE (t);
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = false;
cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited);
delete ppd.visited;
if (parameter_packs)
{
location_t loc = EXPR_LOC_OR_LOC (t, input_location);
error_at (loc, "parameter packs not expanded with %<...%>:");
while (parameter_packs)
{
tree pack = TREE_VALUE (parameter_packs);
tree name = NULL_TREE;
if (TREE_CODE (pack) == TEMPLATE_TYPE_PARM
|| TREE_CODE (pack) == TEMPLATE_TEMPLATE_PARM)
name = TYPE_NAME (pack);
else if (TREE_CODE (pack) == TEMPLATE_PARM_INDEX)
name = DECL_NAME (TEMPLATE_PARM_DECL (pack));
else
name = DECL_NAME (pack);
if (name)
inform (loc, " %qD", name);
else
inform (loc, " <anonymous>");
parameter_packs = TREE_CHAIN (parameter_packs);
}
return true;
}
return false;
}
/* Expand any parameter packs that occur in the template arguments in
ARGS. */
tree
expand_template_argument_pack (tree args)
{
if (args == error_mark_node)
return error_mark_node;
tree result_args = NULL_TREE;
int in_arg, out_arg = 0, nargs = args ? TREE_VEC_LENGTH (args) : 0;
int num_result_args = -1;
int non_default_args_count = -1;
/* First, determine if we need to expand anything, and the number of
slots we'll need. */
for (in_arg = 0; in_arg < nargs; ++in_arg)
{
tree arg = TREE_VEC_ELT (args, in_arg);
if (arg == NULL_TREE)
return args;
if (ARGUMENT_PACK_P (arg))
{
int num_packed = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg));
if (num_result_args < 0)
num_result_args = in_arg + num_packed;
else
num_result_args += num_packed;
}
else
{
if (num_result_args >= 0)
num_result_args++;
}
}
/* If no expansion is necessary, we're done. */
if (num_result_args < 0)
return args;
/* Expand arguments. */
result_args = make_tree_vec (num_result_args);
if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (args))
non_default_args_count =
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (args);
for (in_arg = 0; in_arg < nargs; ++in_arg)
{
tree arg = TREE_VEC_ELT (args, in_arg);
if (ARGUMENT_PACK_P (arg))
{
tree packed = ARGUMENT_PACK_ARGS (arg);
int i, num_packed = TREE_VEC_LENGTH (packed);
for (i = 0; i < num_packed; ++i, ++out_arg)
TREE_VEC_ELT (result_args, out_arg) = TREE_VEC_ELT(packed, i);
if (non_default_args_count > 0)
non_default_args_count += num_packed - 1;
}
else
{
TREE_VEC_ELT (result_args, out_arg) = arg;
++out_arg;
}
}
if (non_default_args_count >= 0)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (result_args, non_default_args_count);
return result_args;
}
/* Checks if DECL shadows a template parameter.
[temp.local]: A template-parameter shall not be redeclared within its
scope (including nested scopes).
Emits an error and returns TRUE if the DECL shadows a parameter,
returns FALSE otherwise. */
bool
check_template_shadow (tree decl)
{
tree olddecl;
/* If we're not in a template, we can't possibly shadow a template
parameter. */
if (!current_template_parms)
return true;
/* Figure out what we're shadowing. */
if (TREE_CODE (decl) == OVERLOAD)
decl = OVL_CURRENT (decl);
olddecl = innermost_non_namespace_value (DECL_NAME (decl));
/* If there's no previous binding for this name, we're not shadowing
anything, let alone a template parameter. */
if (!olddecl)
return true;
/* If we're not shadowing a template parameter, we're done. Note
that OLDDECL might be an OVERLOAD (or perhaps even an
ERROR_MARK), so we can't just blithely assume it to be a _DECL
node. */
if (!DECL_P (olddecl) || !DECL_TEMPLATE_PARM_P (olddecl))
return true;
/* We check for decl != olddecl to avoid bogus errors for using a
name inside a class. We check TPFI to avoid duplicate errors for
inline member templates. */
if (decl == olddecl
|| (DECL_TEMPLATE_PARM_P (decl)
&& TEMPLATE_PARMS_FOR_INLINE (current_template_parms)))
return true;
/* Don't complain about the injected class name, as we've already
complained about the class itself. */
if (DECL_SELF_REFERENCE_P (decl))
return false;
if (DECL_TEMPLATE_PARM_P (decl))
error ("declaration of template parameter %q+D shadows "
"template parameter", decl);
else
error ("declaration of %q+#D shadows template parameter", decl);
inform (DECL_SOURCE_LOCATION (olddecl),
"template parameter %qD declared here", olddecl);
return false;
}
/* Return a new TEMPLATE_PARM_INDEX with the indicated INDEX, LEVEL,
ORIG_LEVEL, DECL, and TYPE. */
static tree
build_template_parm_index (int index,
int level,
int orig_level,
tree decl,
tree type)
{
tree t = make_node (TEMPLATE_PARM_INDEX);
TEMPLATE_PARM_IDX (t) = index;
TEMPLATE_PARM_LEVEL (t) = level;
TEMPLATE_PARM_ORIG_LEVEL (t) = orig_level;
TEMPLATE_PARM_DECL (t) = decl;
TREE_TYPE (t) = type;
TREE_CONSTANT (t) = TREE_CONSTANT (decl);
TREE_READONLY (t) = TREE_READONLY (decl);
return t;
}
/* Find the canonical type parameter for the given template type
parameter. Returns the canonical type parameter, which may be TYPE
if no such parameter existed. */
static tree
canonical_type_parameter (tree type)
{
tree list;
int idx = TEMPLATE_TYPE_IDX (type);
if (!canonical_template_parms)
vec_alloc (canonical_template_parms, idx + 1);
if (canonical_template_parms->length () <= (unsigned) idx)
vec_safe_grow_cleared (canonical_template_parms, idx + 1);
list = (*canonical_template_parms)[idx];
while (list && !comptypes (type, TREE_VALUE (list), COMPARE_STRUCTURAL))
list = TREE_CHAIN (list);
if (list)
return TREE_VALUE (list);
else
{
(*canonical_template_parms)[idx]
= tree_cons (NULL_TREE, type, (*canonical_template_parms)[idx]);
return type;
}
}
/* Return a TEMPLATE_PARM_INDEX, similar to INDEX, but whose
TEMPLATE_PARM_LEVEL has been decreased by LEVELS. If such a
TEMPLATE_PARM_INDEX already exists, it is returned; otherwise, a
new one is created. */
static tree
reduce_template_parm_level (tree index, tree type, int levels, tree args,
tsubst_flags_t complain)
{
if (TEMPLATE_PARM_DESCENDANTS (index) == NULL_TREE
|| (TEMPLATE_PARM_LEVEL (TEMPLATE_PARM_DESCENDANTS (index))
!= TEMPLATE_PARM_LEVEL (index) - levels)
|| !same_type_p (type, TREE_TYPE (TEMPLATE_PARM_DESCENDANTS (index))))
{
tree orig_decl = TEMPLATE_PARM_DECL (index);
tree decl, t;
decl = build_decl (DECL_SOURCE_LOCATION (orig_decl),
TREE_CODE (orig_decl), DECL_NAME (orig_decl), type);
TREE_CONSTANT (decl) = TREE_CONSTANT (orig_decl);
TREE_READONLY (decl) = TREE_READONLY (orig_decl);
DECL_ARTIFICIAL (decl) = 1;
SET_DECL_TEMPLATE_PARM_P (decl);
t = build_template_parm_index (TEMPLATE_PARM_IDX (index),
TEMPLATE_PARM_LEVEL (index) - levels,
TEMPLATE_PARM_ORIG_LEVEL (index),
decl, type);
TEMPLATE_PARM_DESCENDANTS (index) = t;
TEMPLATE_PARM_PARAMETER_PACK (t)
= TEMPLATE_PARM_PARAMETER_PACK (index);
/* Template template parameters need this. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
{
DECL_TEMPLATE_RESULT (decl)
= build_decl (DECL_SOURCE_LOCATION (decl),
TYPE_DECL, DECL_NAME (decl), type);
DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (decl)) = true;
DECL_TEMPLATE_PARMS (decl) = tsubst_template_parms
(DECL_TEMPLATE_PARMS (orig_decl), args, complain);
}
}
return TEMPLATE_PARM_DESCENDANTS (index);
}
/* Process information from new template parameter PARM and append it
to the LIST being built. This new parameter is a non-type
parameter iff IS_NON_TYPE is true. This new parameter is a
parameter pack iff IS_PARAMETER_PACK is true. The location of PARM
is in PARM_LOC. */
tree
process_template_parm (tree list, location_t parm_loc, tree parm,
bool is_non_type, bool is_parameter_pack)
{
tree decl = 0;
int idx = 0;
gcc_assert (TREE_CODE (parm) == TREE_LIST);
tree defval = TREE_PURPOSE (parm);
tree constr = TREE_TYPE (parm);
if (list)
{
tree p = tree_last (list);
if (p && TREE_VALUE (p) != error_mark_node)
{
p = TREE_VALUE (p);
if (TREE_CODE (p) == TYPE_DECL || TREE_CODE (p) == TEMPLATE_DECL)
idx = TEMPLATE_TYPE_IDX (TREE_TYPE (p));
else
idx = TEMPLATE_PARM_IDX (DECL_INITIAL (p));
}
++idx;
}
if (is_non_type)
{
parm = TREE_VALUE (parm);
SET_DECL_TEMPLATE_PARM_P (parm);
if (TREE_TYPE (parm) != error_mark_node)
{
/* [temp.param]
The top-level cv-qualifiers on the template-parameter are
ignored when determining its type. */
TREE_TYPE (parm) = TYPE_MAIN_VARIANT (TREE_TYPE (parm));
if (invalid_nontype_parm_type_p (TREE_TYPE (parm), 1))
TREE_TYPE (parm) = error_mark_node;
else if (uses_parameter_packs (TREE_TYPE (parm))
&& !is_parameter_pack
/* If we're in a nested template parameter list, the template
template parameter could be a parameter pack. */
&& processing_template_parmlist == 1)
{
/* This template parameter is not a parameter pack, but it
should be. Complain about "bare" parameter packs. */
check_for_bare_parameter_packs (TREE_TYPE (parm));
/* Recover by calling this a parameter pack. */
is_parameter_pack = true;
}
}
/* A template parameter is not modifiable. */
TREE_CONSTANT (parm) = 1;
TREE_READONLY (parm) = 1;
decl = build_decl (parm_loc,
CONST_DECL, DECL_NAME (parm), TREE_TYPE (parm));
TREE_CONSTANT (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_INITIAL (parm) = DECL_INITIAL (decl)
= build_template_parm_index (idx, processing_template_decl,
processing_template_decl,
decl, TREE_TYPE (parm));
TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))
= is_parameter_pack;
}
else
{
tree t;
parm = TREE_VALUE (TREE_VALUE (parm));
if (parm && TREE_CODE (parm) == TEMPLATE_DECL)
{
t = cxx_make_type (TEMPLATE_TEMPLATE_PARM);
/* This is for distinguishing between real templates and template
template parameters */
TREE_TYPE (parm) = t;
TREE_TYPE (DECL_TEMPLATE_RESULT (parm)) = t;
decl = parm;
}
else
{
t = cxx_make_type (TEMPLATE_TYPE_PARM);
/* parm is either IDENTIFIER_NODE or NULL_TREE. */
decl = build_decl (parm_loc,
TYPE_DECL, parm, t);
}
TYPE_NAME (t) = decl;
TYPE_STUB_DECL (t) = decl;
parm = decl;
TEMPLATE_TYPE_PARM_INDEX (t)
= build_template_parm_index (idx, processing_template_decl,
processing_template_decl,
decl, TREE_TYPE (parm));
TEMPLATE_TYPE_PARAMETER_PACK (t) = is_parameter_pack;
TYPE_CANONICAL (t) = canonical_type_parameter (t);
}
DECL_ARTIFICIAL (decl) = 1;
SET_DECL_TEMPLATE_PARM_P (decl);
/* Build requirements for the type/template parameter.
This must be done after SET_DECL_TEMPLATE_PARM_P or
process_template_parm could fail. */
tree reqs = finish_shorthand_constraint (parm, constr);
pushdecl (decl);
/* Build the parameter node linking the parameter declaration,
its default argument (if any), and its constraints (if any). */
parm = build_tree_list (defval, parm);
TEMPLATE_PARM_CONSTRAINTS (parm) = reqs;
return chainon (list, parm);
}
/* The end of a template parameter list has been reached. Process the
tree list into a parameter vector, converting each parameter into a more
useful form. Type parameters are saved as IDENTIFIER_NODEs, and others
as PARM_DECLs. */
tree
end_template_parm_list (tree parms)
{
int nparms;
tree parm, next;
tree saved_parmlist = make_tree_vec (list_length (parms));
/* Pop the dummy parameter level and add the real one. */
current_template_parms = TREE_CHAIN (current_template_parms);
current_template_parms
= tree_cons (size_int (processing_template_decl),
saved_parmlist, current_template_parms);
for (parm = parms, nparms = 0; parm; parm = next, nparms++)
{
next = TREE_CHAIN (parm);
TREE_VEC_ELT (saved_parmlist, nparms) = parm;
TREE_CHAIN (parm) = NULL_TREE;
}
--processing_template_parmlist;
return saved_parmlist;
}
// Explicitly indicate the end of the template parameter list. We assume
// that the current template parameters have been constructed and/or
// managed explicitly, as when creating new template template parameters
// from a shorthand constraint.
void
end_template_parm_list ()
{
--processing_template_parmlist;
}
/* end_template_decl is called after a template declaration is seen. */
void
end_template_decl (void)
{
reset_specialization ();
if (! processing_template_decl)
return;
/* This matches the pushlevel in begin_template_parm_list. */
finish_scope ();
--processing_template_decl;
current_template_parms = TREE_CHAIN (current_template_parms);
}
/* Takes a TREE_LIST representing a template parameter and convert it
into an argument suitable to be passed to the type substitution
functions. Note that If the TREE_LIST contains an error_mark
node, the returned argument is error_mark_node. */
tree
template_parm_to_arg (tree t)
{
if (t == NULL_TREE
|| TREE_CODE (t) != TREE_LIST)
return t;
if (error_operand_p (TREE_VALUE (t)))
return error_mark_node;
t = TREE_VALUE (t);
if (TREE_CODE (t) == TYPE_DECL
|| TREE_CODE (t) == TEMPLATE_DECL)
{
t = TREE_TYPE (t);
if (TEMPLATE_TYPE_PARAMETER_PACK (t))
{
/* Turn this argument into a TYPE_ARGUMENT_PACK
with a single element, which expands T. */
tree vec = make_tree_vec (1);
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
TREE_VEC_ELT (vec, 0) = make_pack_expansion (t);
t = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (t, vec);
}
}
else
{
t = DECL_INITIAL (t);
if (TEMPLATE_PARM_PARAMETER_PACK (t))
{
/* Turn this argument into a NONTYPE_ARGUMENT_PACK
with a single element, which expands T. */
tree vec = make_tree_vec (1);
tree type = TREE_TYPE (TEMPLATE_PARM_DECL (t));
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
t = convert_from_reference (t);
TREE_VEC_ELT (vec, 0) = make_pack_expansion (t);
t = make_node (NONTYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (t, vec);
TREE_TYPE (t) = type;
}
else
t = convert_from_reference (t);
}
return t;
}
/* Given a single level of template parameters (a TREE_VEC), return it
as a set of template arguments. */
static tree
template_parms_level_to_args (tree parms)
{
tree a = copy_node (parms);
TREE_TYPE (a) = NULL_TREE;
for (int i = TREE_VEC_LENGTH (a) - 1; i >= 0; --i)
TREE_VEC_ELT (a, i) = template_parm_to_arg (TREE_VEC_ELT (a, i));
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (a, TREE_VEC_LENGTH (a));
return a;
}
/* Given a set of template parameters, return them as a set of template
arguments. The template parameters are represented as a TREE_VEC, in
the form documented in cp-tree.h for template arguments. */
static tree
template_parms_to_args (tree parms)
{
tree header;
tree args = NULL_TREE;
int length = TMPL_PARMS_DEPTH (parms);
int l = length;
/* If there is only one level of template parameters, we do not
create a TREE_VEC of TREE_VECs. Instead, we return a single
TREE_VEC containing the arguments. */
if (length > 1)
args = make_tree_vec (length);
for (header = parms; header; header = TREE_CHAIN (header))
{
tree a = template_parms_level_to_args (TREE_VALUE (header));
if (length > 1)
TREE_VEC_ELT (args, --l) = a;
else
args = a;
}
return args;
}
/* Within the declaration of a template, return the currently active
template parameters as an argument TREE_VEC. */
static tree
current_template_args (void)
{
return template_parms_to_args (current_template_parms);
}
/* Update the declared TYPE by doing any lookups which were thought to be
dependent, but are not now that we know the SCOPE of the declarator. */
tree
maybe_update_decl_type (tree orig_type, tree scope)
{
tree type = orig_type;
if (type == NULL_TREE)
return type;
if (TREE_CODE (orig_type) == TYPE_DECL)
type = TREE_TYPE (type);
if (scope && TYPE_P (scope) && dependent_type_p (scope)
&& dependent_type_p (type)
/* Don't bother building up the args in this case. */
&& TREE_CODE (type) != TEMPLATE_TYPE_PARM)
{
/* tsubst in the args corresponding to the template parameters,
including auto if present. Most things will be unchanged, but
make_typename_type and tsubst_qualified_id will resolve
TYPENAME_TYPEs and SCOPE_REFs that were previously dependent. */
tree args = current_template_args ();
tree auto_node = type_uses_auto (type);
tree pushed;
if (auto_node)
{
tree auto_vec = make_tree_vec (1);
TREE_VEC_ELT (auto_vec, 0) = auto_node;
args = add_to_template_args (args, auto_vec);
}
pushed = push_scope (scope);
type = tsubst (type, args, tf_warning_or_error, NULL_TREE);
if (pushed)
pop_scope (scope);
}
if (type == error_mark_node)
return orig_type;
if (TREE_CODE (orig_type) == TYPE_DECL)
{
if (same_type_p (type, TREE_TYPE (orig_type)))
type = orig_type;
else
type = TYPE_NAME (type);
}
return type;
}
/* Return a TEMPLATE_DECL corresponding to DECL, using the indicated
template PARMS and constraints, CONSTR. If MEMBER_TEMPLATE_P is true,
the new template is a member template. */
tree
build_template_decl (tree decl, tree parms, bool member_template_p)
{
tree tmpl = build_lang_decl (TEMPLATE_DECL, DECL_NAME (decl), NULL_TREE);
DECL_TEMPLATE_PARMS (tmpl) = parms;
DECL_CONTEXT (tmpl) = DECL_CONTEXT (decl);
DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl);
DECL_MEMBER_TEMPLATE_P (tmpl) = member_template_p;
return tmpl;
}
struct template_parm_data
{
/* The level of the template parameters we are currently
processing. */
int level;
/* The index of the specialization argument we are currently
processing. */
int current_arg;
/* An array whose size is the number of template parameters. The
elements are nonzero if the parameter has been used in any one
of the arguments processed so far. */
int* parms;
/* An array whose size is the number of template arguments. The
elements are nonzero if the argument makes use of template
parameters of this level. */
int* arg_uses_template_parms;
};
/* Subroutine of push_template_decl used to see if each template
parameter in a partial specialization is used in the explicit
argument list. If T is of the LEVEL given in DATA (which is
treated as a template_parm_data*), then DATA->PARMS is marked
appropriately. */
static int
mark_template_parm (tree t, void* data)
{
int level;
int idx;
struct template_parm_data* tpd = (struct template_parm_data*) data;
template_parm_level_and_index (t, &level, &idx);
if (level == tpd->level)
{
tpd->parms[idx] = 1;
tpd->arg_uses_template_parms[tpd->current_arg] = 1;
}
/* In C++17 the type of a non-type argument is a deduced context. */
if (cxx_dialect >= cxx1z
&& TREE_CODE (t) == TEMPLATE_PARM_INDEX)
for_each_template_parm (TREE_TYPE (t),
&mark_template_parm,
data,
NULL,
/*include_nondeduced_p=*/false);
/* Return zero so that for_each_template_parm will continue the
traversal of the tree; we want to mark *every* template parm. */
return 0;
}
/* Process the partial specialization DECL. */
static tree
process_partial_specialization (tree decl)
{
tree type = TREE_TYPE (decl);
tree tinfo = get_template_info (decl);
tree maintmpl = TI_TEMPLATE (tinfo);
tree specargs = TI_ARGS (tinfo);
tree inner_args = INNERMOST_TEMPLATE_ARGS (specargs);
tree main_inner_parms = DECL_INNERMOST_TEMPLATE_PARMS (maintmpl);
tree inner_parms;
tree inst;
int nargs = TREE_VEC_LENGTH (inner_args);
int ntparms;
int i;
bool did_error_intro = false;
struct template_parm_data tpd;
struct template_parm_data tpd2;
gcc_assert (current_template_parms);
/* A concept cannot be specialized. */
if (flag_concepts && variable_concept_p (maintmpl))
{
error ("specialization of variable concept %q#D", maintmpl);
return error_mark_node;
}
inner_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms);
ntparms = TREE_VEC_LENGTH (inner_parms);
/* We check that each of the template parameters given in the
partial specialization is used in the argument list to the
specialization. For example:
template <class T> struct S;
template <class T> struct S<T*>;
The second declaration is OK because `T*' uses the template
parameter T, whereas
template <class T> struct S<int>;
is no good. Even trickier is:
template <class T>
struct S1
{
template <class U>
struct S2;
template <class U>
struct S2<T>;
};
The S2<T> declaration is actually invalid; it is a
full-specialization. Of course,
template <class U>
struct S2<T (*)(U)>;
or some such would have been OK. */
tpd.level = TMPL_PARMS_DEPTH (current_template_parms);
tpd.parms = XALLOCAVEC (int, ntparms);
memset (tpd.parms, 0, sizeof (int) * ntparms);
tpd.arg_uses_template_parms = XALLOCAVEC (int, nargs);
memset (tpd.arg_uses_template_parms, 0, sizeof (int) * nargs);
for (i = 0; i < nargs; ++i)
{
tpd.current_arg = i;
for_each_template_parm (TREE_VEC_ELT (inner_args, i),
&mark_template_parm,
&tpd,
NULL,
/*include_nondeduced_p=*/false);
}
for (i = 0; i < ntparms; ++i)
if (tpd.parms[i] == 0)
{
/* One of the template parms was not used in a deduced context in the
specialization. */
if (!did_error_intro)
{
error ("template parameters not deducible in "
"partial specialization:");
did_error_intro = true;
}
inform (input_location, " %qD",
TREE_VALUE (TREE_VEC_ELT (inner_parms, i)));
}
if (did_error_intro)
return error_mark_node;
/* [temp.class.spec]
The argument list of the specialization shall not be identical to
the implicit argument list of the primary template. */
tree main_args
= TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (maintmpl)));
if (comp_template_args (inner_args, INNERMOST_TEMPLATE_ARGS (main_args))
&& (!flag_concepts
|| !strictly_subsumes (current_template_constraints (),
get_constraints (maintmpl))))
{
if (!flag_concepts)
error ("partial specialization %q+D does not specialize "
"any template arguments; to define the primary template, "
"remove the template argument list", decl);
else
error ("partial specialization %q+D does not specialize any "
"template arguments and is not more constrained than "
"the primary template; to define the primary template, "
"remove the template argument list", decl);
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here");
}
/* A partial specialization that replaces multiple parameters of the
primary template with a pack expansion is less specialized for those
parameters. */
if (nargs < DECL_NTPARMS (maintmpl))
{
error ("partial specialization is not more specialized than the "
"primary template because it replaces multiple parameters "
"with a pack expansion");
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here");
/* Avoid crash in process_partial_specialization. */
return decl;
}
/* If we aren't in a dependent class, we can actually try deduction. */
else if (tpd.level == 1
/* FIXME we should be able to handle a partial specialization of a
partial instantiation, but currently we can't (c++/41727). */
&& TMPL_ARGS_DEPTH (specargs) == 1
&& !get_partial_spec_bindings (maintmpl, maintmpl, specargs))
{
if (permerror (input_location, "partial specialization %qD is not "
"more specialized than", decl))
inform (DECL_SOURCE_LOCATION (maintmpl), "primary template %qD",
maintmpl);
}
/* [temp.class.spec]
A partially specialized non-type argument expression shall not
involve template parameters of the partial specialization except
when the argument expression is a simple identifier.
The type of a template parameter corresponding to a specialized
non-type argument shall not be dependent on a parameter of the
specialization.
Also, we verify that pack expansions only occur at the
end of the argument list. */
gcc_assert (nargs == DECL_NTPARMS (maintmpl));
tpd2.parms = 0;
for (i = 0; i < nargs; ++i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (main_inner_parms, i));
tree arg = TREE_VEC_ELT (inner_args, i);
tree packed_args = NULL_TREE;
int j, len = 1;
if (ARGUMENT_PACK_P (arg))
{
/* Extract the arguments from the argument pack. We'll be
iterating over these in the following loop. */
packed_args = ARGUMENT_PACK_ARGS (arg);
len = TREE_VEC_LENGTH (packed_args);
}
for (j = 0; j < len; j++)
{
if (packed_args)
/* Get the Jth argument in the parameter pack. */
arg = TREE_VEC_ELT (packed_args, j);
if (PACK_EXPANSION_P (arg))
{
/* Pack expansions must come at the end of the
argument list. */
if ((packed_args && j < len - 1)
|| (!packed_args && i < nargs - 1))
{
if (TREE_CODE (arg) == EXPR_PACK_EXPANSION)
error ("parameter pack argument %qE must be at the "
"end of the template argument list", arg);
else
error ("parameter pack argument %qT must be at the "
"end of the template argument list", arg);
}
}
if (TREE_CODE (arg) == EXPR_PACK_EXPANSION)
/* We only care about the pattern. */
arg = PACK_EXPANSION_PATTERN (arg);
if (/* These first two lines are the `non-type' bit. */
!TYPE_P (arg)
&& TREE_CODE (arg) != TEMPLATE_DECL
/* This next two lines are the `argument expression is not just a
simple identifier' condition and also the `specialized
non-type argument' bit. */
&& TREE_CODE (arg) != TEMPLATE_PARM_INDEX
&& !(REFERENCE_REF_P (arg)
&& TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_PARM_INDEX))
{
if ((!packed_args && tpd.arg_uses_template_parms[i])
|| (packed_args && uses_template_parms (arg)))
error ("template argument %qE involves template parameter(s)",
arg);
else
{
/* Look at the corresponding template parameter,
marking which template parameters its type depends
upon. */
tree type = TREE_TYPE (parm);
if (!tpd2.parms)
{
/* We haven't yet initialized TPD2. Do so now. */
tpd2.arg_uses_template_parms = XALLOCAVEC (int, nargs);
/* The number of parameters here is the number in the
main template, which, as checked in the assertion
above, is NARGS. */
tpd2.parms = XALLOCAVEC (int, nargs);
tpd2.level =
TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (maintmpl));
}
/* Mark the template parameters. But this time, we're
looking for the template parameters of the main
template, not in the specialization. */
tpd2.current_arg = i;
tpd2.arg_uses_template_parms[i] = 0;
memset (tpd2.parms, 0, sizeof (int) * nargs);
for_each_template_parm (type,
&mark_template_parm,
&tpd2,
NULL,
/*include_nondeduced_p=*/false);
if (tpd2.arg_uses_template_parms [i])
{
/* The type depended on some template parameters.
If they are fully specialized in the
specialization, that's OK. */
int j;
int count = 0;
for (j = 0; j < nargs; ++j)
if (tpd2.parms[j] != 0
&& tpd.arg_uses_template_parms [j])
++count;
if (count != 0)
error_n (input_location, count,
"type %qT of template argument %qE depends "
"on a template parameter",
"type %qT of template argument %qE depends "
"on template parameters",
type,
arg);
}
}
}
}
}
/* We should only get here once. */
if (TREE_CODE (decl) == TYPE_DECL)
gcc_assert (!COMPLETE_TYPE_P (type));
// Build the template decl.
tree tmpl = build_template_decl (decl, current_template_parms,
DECL_MEMBER_TEMPLATE_P (maintmpl));
TREE_TYPE (tmpl) = type;
DECL_TEMPLATE_RESULT (tmpl) = decl;
SET_DECL_TEMPLATE_SPECIALIZATION (tmpl);
DECL_TEMPLATE_INFO (tmpl) = build_template_info (maintmpl, specargs);
DECL_PRIMARY_TEMPLATE (tmpl) = maintmpl;
/* Give template template parms a DECL_CONTEXT of the template
for which they are a parameter. */
for (i = 0; i < ntparms; ++i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (inner_parms, i));
if (TREE_CODE (parm) == TEMPLATE_DECL)
DECL_CONTEXT (parm) = tmpl;
}
if (VAR_P (decl))
/* We didn't register this in check_explicit_specialization so we could
wait until the constraints were set. */
decl = register_specialization (decl, maintmpl, specargs, false, 0);
else
associate_classtype_constraints (type);
DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)
= tree_cons (specargs, tmpl,
DECL_TEMPLATE_SPECIALIZATIONS (maintmpl));
TREE_TYPE (DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)) = type;
for (inst = DECL_TEMPLATE_INSTANTIATIONS (maintmpl); inst;
inst = TREE_CHAIN (inst))
{
tree instance = TREE_VALUE (inst);
if (TYPE_P (instance)
? (COMPLETE_TYPE_P (instance)
&& CLASSTYPE_IMPLICIT_INSTANTIATION (instance))
: DECL_TEMPLATE_INSTANTIATION (instance))
{
tree spec = most_specialized_partial_spec (instance, tf_none);
tree inst_decl = (DECL_P (instance)
? instance : TYPE_NAME (instance));
if (!spec)
/* OK */;
else if (spec == error_mark_node)
permerror (input_location,
"declaration of %qD ambiguates earlier template "
"instantiation for %qD", decl, inst_decl);
else if (TREE_VALUE (spec) == tmpl)
permerror (input_location,
"partial specialization of %qD after instantiation "
"of %qD", decl, inst_decl);
}
}
return decl;
}
/* PARM is a template parameter of some form; return the corresponding
TEMPLATE_PARM_INDEX. */
static tree
get_template_parm_index (tree parm)
{
if (TREE_CODE (parm) == PARM_DECL
|| TREE_CODE (parm) == CONST_DECL)
parm = DECL_INITIAL (parm);
else if (TREE_CODE (parm) == TYPE_DECL
|| TREE_CODE (parm) == TEMPLATE_DECL)
parm = TREE_TYPE (parm);
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
|| TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM)
parm = TEMPLATE_TYPE_PARM_INDEX (parm);
gcc_assert (TREE_CODE (parm) == TEMPLATE_PARM_INDEX);
return parm;
}
/* Subroutine of fixed_parameter_pack_p below. Look for any template
parameter packs used by the template parameter PARM. */
static void
fixed_parameter_pack_p_1 (tree parm, struct find_parameter_pack_data *ppd)
{
/* A type parm can't refer to another parm. */
if (TREE_CODE (parm) == TYPE_DECL)
return;
else if (TREE_CODE (parm) == PARM_DECL)
{
cp_walk_tree (&TREE_TYPE (parm), &find_parameter_packs_r,
ppd, ppd->visited);
return;
}
gcc_assert (TREE_CODE (parm) == TEMPLATE_DECL);
tree vec = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (parm));
for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i)
fixed_parameter_pack_p_1 (TREE_VALUE (TREE_VEC_ELT (vec, i)), ppd);
}
/* PARM is a template parameter pack. Return any parameter packs used in
its type or the type of any of its template parameters. If there are
any such packs, it will be instantiated into a fixed template parameter
list by partial instantiation rather than be fully deduced. */
tree
fixed_parameter_pack_p (tree parm)
{
/* This can only be true in a member template. */
if (TEMPLATE_PARM_ORIG_LEVEL (get_template_parm_index (parm)) < 2)
return NULL_TREE;
/* This can only be true for a parameter pack. */
if (!template_parameter_pack_p (parm))
return NULL_TREE;
/* A type parm can't refer to another parm. */
if (TREE_CODE (parm) == TYPE_DECL)
return NULL_TREE;
tree parameter_packs = NULL_TREE;
struct find_parameter_pack_data ppd;
ppd.parameter_packs = ¶meter_packs;
ppd.visited = new hash_set<tree>;
ppd.type_pack_expansion_p = false;
fixed_parameter_pack_p_1 (parm, &ppd);
delete ppd.visited;
return parameter_packs;
}
/* Check that a template declaration's use of default arguments and
parameter packs is not invalid. Here, PARMS are the template
parameters. IS_PRIMARY is true if DECL is the thing declared by
a primary template. IS_PARTIAL is true if DECL is a partial
specialization.
IS_FRIEND_DECL is nonzero if DECL is a friend function template
declaration (but not a definition); 1 indicates a declaration, 2
indicates a redeclaration. When IS_FRIEND_DECL=2, no errors are
emitted for extraneous default arguments.
Returns TRUE if there were no errors found, FALSE otherwise. */
bool
check_default_tmpl_args (tree decl, tree parms, bool is_primary,
bool is_partial, int is_friend_decl)
{
const char *msg;
int last_level_to_check;
tree parm_level;
bool no_errors = true;
/* [temp.param]
A default template-argument shall not be specified in a
function template declaration or a function template definition, nor
in the template-parameter-list of the definition of a member of a
class template. */
if (TREE_CODE (CP_DECL_CONTEXT (decl)) == FUNCTION_DECL
|| (TREE_CODE (decl) == FUNCTION_DECL && DECL_LOCAL_FUNCTION_P (decl)))
/* You can't have a function template declaration in a local
scope, nor you can you define a member of a class template in a
local scope. */
return true;
if ((TREE_CODE (decl) == TYPE_DECL
&& TREE_TYPE (decl)
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
|| (TREE_CODE (decl) == FUNCTION_DECL
&& LAMBDA_FUNCTION_P (decl)))
/* A lambda doesn't have an explicit declaration; don't complain
about the parms of the enclosing class. */
return true;
if (current_class_type
&& !TYPE_BEING_DEFINED (current_class_type)
&& DECL_LANG_SPECIFIC (decl)
&& DECL_DECLARES_FUNCTION_P (decl)
/* If this is either a friend defined in the scope of the class
or a member function. */
&& (DECL_FUNCTION_MEMBER_P (decl)
? same_type_p (DECL_CONTEXT (decl), current_class_type)
: DECL_FRIEND_CONTEXT (decl)
? same_type_p (DECL_FRIEND_CONTEXT (decl), current_class_type)
: false)
/* And, if it was a member function, it really was defined in
the scope of the class. */
&& (!DECL_FUNCTION_MEMBER_P (decl)
|| DECL_INITIALIZED_IN_CLASS_P (decl)))
/* We already checked these parameters when the template was
declared, so there's no need to do it again now. This function
was defined in class scope, but we're processing its body now
that the class is complete. */
return true;
/* Core issue 226 (C++0x only): the following only applies to class
templates. */
if (is_primary
&& ((cxx_dialect == cxx98) || TREE_CODE (decl) != FUNCTION_DECL))
{
/* [temp.param]
If a template-parameter has a default template-argument, all
subsequent template-parameters shall have a default
template-argument supplied. */
for (parm_level = parms; parm_level; parm_level = TREE_CHAIN (parm_level))
{
tree inner_parms = TREE_VALUE (parm_level);
int ntparms = TREE_VEC_LENGTH (inner_parms);
int seen_def_arg_p = 0;
int i;
for (i = 0; i < ntparms; ++i)
{
tree parm = TREE_VEC_ELT (inner_parms, i);
if (parm == error_mark_node)
continue;
if (TREE_PURPOSE (parm))
seen_def_arg_p = 1;
else if (seen_def_arg_p
&& !template_parameter_pack_p (TREE_VALUE (parm)))
{
error ("no default argument for %qD", TREE_VALUE (parm));
/* For better subsequent error-recovery, we indicate that
there should have been a default argument. */
TREE_PURPOSE (parm) = error_mark_node;
no_errors = false;
}
else if (!is_partial
&& !is_friend_decl
/* Don't complain about an enclosing partial
specialization. */
&& parm_level == parms
&& TREE_CODE (decl) == TYPE_DECL
&& i < ntparms - 1
&& template_parameter_pack_p (TREE_VALUE (parm))
/* A fixed parameter pack will be partially
instantiated into a fixed length list. */
&& !fixed_parameter_pack_p (TREE_VALUE (parm)))
{
/* A primary class template can only have one
parameter pack, at the end of the template
parameter list. */
error ("parameter pack %q+D must be at the end of the"
" template parameter list", TREE_VALUE (parm));
TREE_VALUE (TREE_VEC_ELT (inner_parms, i))
= error_mark_node;
no_errors = false;
}
}
}
}
if (((cxx_dialect == cxx98) && TREE_CODE (decl) != TYPE_DECL)
|| is_partial
|| !is_primary
|| is_friend_decl)
/* For an ordinary class template, default template arguments are
allowed at the innermost level, e.g.:
template <class T = int>
struct S {};
but, in a partial specialization, they're not allowed even
there, as we have in [temp.class.spec]:
The template parameter list of a specialization shall not
contain default template argument values.
So, for a partial specialization, or for a function template
(in C++98/C++03), we look at all of them. */
;
else
/* But, for a primary class template that is not a partial
specialization we look at all template parameters except the
innermost ones. */
parms = TREE_CHAIN (parms);
/* Figure out what error message to issue. */
if (is_friend_decl == 2)
msg = G_("default template arguments may not be used in function template "
"friend re-declaration");
else if (is_friend_decl)
msg = G_("default template arguments may not be used in function template "
"friend declarations");
else if (TREE_CODE (decl) == FUNCTION_DECL && (cxx_dialect == cxx98))
msg = G_("default template arguments may not be used in function templates "
"without -std=c++11 or -std=gnu++11");
else if (is_partial)
msg = G_("default template arguments may not be used in "
"partial specializations");
else if (current_class_type && CLASSTYPE_IS_TEMPLATE (current_class_type))
msg = G_("default argument for template parameter for class enclosing %qD");
else
/* Per [temp.param]/9, "A default template-argument shall not be
specified in the template-parameter-lists of the definition of
a member of a class template that appears outside of the member's
class.", thus if we aren't handling a member of a class template
there is no need to examine the parameters. */
return true;
if (current_class_type && TYPE_BEING_DEFINED (current_class_type))
/* If we're inside a class definition, there's no need to
examine the parameters to the class itself. On the one
hand, they will be checked when the class is defined, and,
on the other, default arguments are valid in things like:
template <class T = double>
struct S { template <class U> void f(U); };
Here the default argument for `S' has no bearing on the
declaration of `f'. */
last_level_to_check = template_class_depth (current_class_type) + 1;
else
/* Check everything. */
last_level_to_check = 0;
for (parm_level = parms;
parm_level && TMPL_PARMS_DEPTH (parm_level) >= last_level_to_check;
parm_level = TREE_CHAIN (parm_level))
{
tree inner_parms = TREE_VALUE (parm_level);
int i;
int ntparms;
ntparms = TREE_VEC_LENGTH (inner_parms);
for (i = 0; i < ntparms; ++i)
{
if (TREE_VEC_ELT (inner_parms, i) == error_mark_node)
continue;
if (TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)))
{
if (msg)
{
no_errors = false;
if (is_friend_decl == 2)
return no_errors;
error (msg, decl);
msg = 0;
}
/* Clear out the default argument so that we are not
confused later. */
TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)) = NULL_TREE;
}
}
/* At this point, if we're still interested in issuing messages,
they must apply to classes surrounding the object declared. */
if (msg)
msg = G_("default argument for template parameter for class "
"enclosing %qD");
}
return no_errors;
}
/* Worker for push_template_decl_real, called via
for_each_template_parm. DATA is really an int, indicating the
level of the parameters we are interested in. If T is a template
parameter of that level, return nonzero. */
static int
template_parm_this_level_p (tree t, void* data)
{
int this_level = *(int *)data;
int level;
if (TREE_CODE (t) == TEMPLATE_PARM_INDEX)
level = TEMPLATE_PARM_LEVEL (t);
else
level = TEMPLATE_TYPE_LEVEL (t);
return level == this_level;
}
/* Worker for uses_outer_template_parms, called via for_each_template_parm.
DATA is really an int, indicating the innermost outer level of parameters.
If T is a template parameter of that level or further out, return
nonzero. */
static int
template_parm_outer_level (tree t, void *data)
{
int this_level = *(int *)data;
int level;
if (TREE_CODE (t) == TEMPLATE_PARM_INDEX)
level = TEMPLATE_PARM_LEVEL (t);
else
level = TEMPLATE_TYPE_LEVEL (t);
return level <= this_level;
}
/* Creates a TEMPLATE_DECL for the indicated DECL using the template
parameters given by current_template_args, or reuses a
previously existing one, if appropriate. Returns the DECL, or an
equivalent one, if it is replaced via a call to duplicate_decls.
If IS_FRIEND is true, DECL is a friend declaration. */
tree
push_template_decl_real (tree decl, bool is_friend)
{
tree tmpl;
tree args;
tree info;
tree ctx;
bool is_primary;
bool is_partial;
int new_template_p = 0;
/* True if the template is a member template, in the sense of
[temp.mem]. */
bool member_template_p = false;
if (decl == error_mark_node || !current_template_parms)
return error_mark_node;
/* See if this is a partial specialization. */
is_partial = ((DECL_IMPLICIT_TYPEDEF_P (decl)
&& TREE_CODE (TREE_TYPE (decl)) != ENUMERAL_TYPE
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl)))
|| (VAR_P (decl)
&& DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_SPECIALIZATION (decl)
&& TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl))));
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_P (decl))
is_friend = true;
if (is_friend)
/* For a friend, we want the context of the friend function, not
the type of which it is a friend. */
ctx = CP_DECL_CONTEXT (decl);
else if (CP_DECL_CONTEXT (decl)
&& TREE_CODE (CP_DECL_CONTEXT (decl)) != NAMESPACE_DECL)
/* In the case of a virtual function, we want the class in which
it is defined. */
ctx = CP_DECL_CONTEXT (decl);
else
/* Otherwise, if we're currently defining some class, the DECL
is assumed to be a member of the class. */
ctx = current_scope ();
if (ctx && TREE_CODE (ctx) == NAMESPACE_DECL)
ctx = NULL_TREE;
if (!DECL_CONTEXT (decl))
DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace);
/* See if this is a primary template. */
if (is_friend && ctx
&& uses_template_parms_level (ctx, processing_template_decl))
/* A friend template that specifies a class context, i.e.
template <typename T> friend void A<T>::f();
is not primary. */
is_primary = false;
else if (TREE_CODE (decl) == TYPE_DECL
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
is_primary = false;
else
is_primary = template_parm_scope_p ();
if (is_primary)
{
warning (OPT_Wtemplates, "template %qD declared", decl);
if (DECL_CLASS_SCOPE_P (decl))
member_template_p = true;
if (TREE_CODE (decl) == TYPE_DECL
&& anon_aggrname_p (DECL_NAME (decl)))
{
error ("template class without a name");
return error_mark_node;
}
else if (TREE_CODE (decl) == FUNCTION_DECL)
{
if (member_template_p)
{
if (DECL_OVERRIDE_P (decl) || DECL_FINAL_P (decl))
error ("member template %qD may not have virt-specifiers", decl);
}
if (DECL_DESTRUCTOR_P (decl))
{
/* [temp.mem]
A destructor shall not be a member template. */
error ("destructor %qD declared as member template", decl);
return error_mark_node;
}
if (NEW_DELETE_OPNAME_P (DECL_NAME (decl))
&& (!prototype_p (TREE_TYPE (decl))
|| TYPE_ARG_TYPES (TREE_TYPE (decl)) == void_list_node
|| !TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl)))
|| (TREE_CHAIN (TYPE_ARG_TYPES ((TREE_TYPE (decl))))
== void_list_node)))
{
/* [basic.stc.dynamic.allocation]
An allocation function can be a function
template. ... Template allocation functions shall
have two or more parameters. */
error ("invalid template declaration of %qD", decl);
return error_mark_node;
}
}
else if (DECL_IMPLICIT_TYPEDEF_P (decl)
&& CLASS_TYPE_P (TREE_TYPE (decl)))
{
/* Class template, set TEMPLATE_TYPE_PARM_FOR_CLASS. */
tree parms = INNERMOST_TEMPLATE_PARMS (current_template_parms);
for (int i = 0; i < TREE_VEC_LENGTH (parms); ++i)
{
tree t = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (TREE_CODE (t) == TYPE_DECL)
t = TREE_TYPE (t);
if (TREE_CODE (t) == TEMPLATE_TYPE_PARM)
TEMPLATE_TYPE_PARM_FOR_CLASS (t) = true;
}
}
else if (TREE_CODE (decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (decl))
/* alias-declaration */
gcc_assert (!DECL_ARTIFICIAL (decl));
else if (VAR_P (decl))
/* C++14 variable template. */;
else
{
error ("template declaration of %q#D", decl);
return error_mark_node;
}
}
/* Check to see that the rules regarding the use of default
arguments are not being violated. */
check_default_tmpl_args (decl, current_template_parms,
is_primary, is_partial, /*is_friend_decl=*/0);
/* Ensure that there are no parameter packs in the type of this
declaration that have not been expanded. */
if (TREE_CODE (decl) == FUNCTION_DECL)
{
/* Check each of the arguments individually to see if there are
any bare parameter packs. */
tree type = TREE_TYPE (decl);
tree arg = DECL_ARGUMENTS (decl);
tree argtype = TYPE_ARG_TYPES (type);
while (arg && argtype)
{
if (!DECL_PACK_P (arg)
&& check_for_bare_parameter_packs (TREE_TYPE (arg)))
{
/* This is a PARM_DECL that contains unexpanded parameter
packs. We have already complained about this in the
check_for_bare_parameter_packs call, so just replace
these types with ERROR_MARK_NODE. */
TREE_TYPE (arg) = error_mark_node;
TREE_VALUE (argtype) = error_mark_node;
}
arg = DECL_CHAIN (arg);
argtype = TREE_CHAIN (argtype);
}
/* Check for bare parameter packs in the return type and the
exception specifiers. */
if (check_for_bare_parameter_packs (TREE_TYPE (type)))
/* Errors were already issued, set return type to int
as the frontend doesn't expect error_mark_node as
the return type. */
TREE_TYPE (type) = integer_type_node;
if (check_for_bare_parameter_packs (TYPE_RAISES_EXCEPTIONS (type)))
TYPE_RAISES_EXCEPTIONS (type) = NULL_TREE;
}
else if (check_for_bare_parameter_packs ((TREE_CODE (decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (decl))
? DECL_ORIGINAL_TYPE (decl)
: TREE_TYPE (decl)))
{
TREE_TYPE (decl) = error_mark_node;
return error_mark_node;
}
if (is_partial)
return process_partial_specialization (decl);
args = current_template_args ();
if (!ctx
|| TREE_CODE (ctx) == FUNCTION_DECL
|| (CLASS_TYPE_P (ctx) && TYPE_BEING_DEFINED (ctx))
|| (TREE_CODE (decl) == TYPE_DECL
&& LAMBDA_TYPE_P (TREE_TYPE (decl)))
|| (is_friend && !DECL_TEMPLATE_INFO (decl)))
{
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& DECL_TI_TEMPLATE (decl))
tmpl = DECL_TI_TEMPLATE (decl);
/* If DECL is a TYPE_DECL for a class-template, then there won't
be DECL_LANG_SPECIFIC. The information equivalent to
DECL_TEMPLATE_INFO is found in TYPE_TEMPLATE_INFO instead. */
else if (DECL_IMPLICIT_TYPEDEF_P (decl)
&& TYPE_TEMPLATE_INFO (TREE_TYPE (decl))
&& TYPE_TI_TEMPLATE (TREE_TYPE (decl)))
{
/* Since a template declaration already existed for this
class-type, we must be redeclaring it here. Make sure
that the redeclaration is valid. */
redeclare_class_template (TREE_TYPE (decl),
current_template_parms,
current_template_constraints ());
/* We don't need to create a new TEMPLATE_DECL; just use the
one we already had. */
tmpl = TYPE_TI_TEMPLATE (TREE_TYPE (decl));
}
else
{
tmpl = build_template_decl (decl, current_template_parms,
member_template_p);
new_template_p = 1;
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_SPECIALIZATION (decl))
{
/* A specialization of a member template of a template
class. */
SET_DECL_TEMPLATE_SPECIALIZATION (tmpl);
DECL_TEMPLATE_INFO (tmpl) = DECL_TEMPLATE_INFO (decl);
DECL_TEMPLATE_INFO (decl) = NULL_TREE;
}
}
}
else
{
tree a, t, current, parms;
int i;
tree tinfo = get_template_info (decl);
if (!tinfo)
{
error ("template definition of non-template %q#D", decl);
return error_mark_node;
}
tmpl = TI_TEMPLATE (tinfo);
if (DECL_FUNCTION_TEMPLATE_P (tmpl)
&& DECL_TEMPLATE_INFO (decl) && DECL_TI_ARGS (decl)
&& DECL_TEMPLATE_SPECIALIZATION (decl)
&& DECL_MEMBER_TEMPLATE_P (tmpl))
{
tree new_tmpl;
/* The declaration is a specialization of a member
template, declared outside the class. Therefore, the
innermost template arguments will be NULL, so we
replace them with the arguments determined by the
earlier call to check_explicit_specialization. */
args = DECL_TI_ARGS (decl);
new_tmpl
= build_template_decl (decl, current_template_parms,
member_template_p);
DECL_TEMPLATE_RESULT (new_tmpl) = decl;
TREE_TYPE (new_tmpl) = TREE_TYPE (decl);
DECL_TI_TEMPLATE (decl) = new_tmpl;
SET_DECL_TEMPLATE_SPECIALIZATION (new_tmpl);
DECL_TEMPLATE_INFO (new_tmpl)
= build_template_info (tmpl, args);
register_specialization (new_tmpl,
most_general_template (tmpl),
args,
is_friend, 0);
return decl;
}
/* Make sure the template headers we got make sense. */
parms = DECL_TEMPLATE_PARMS (tmpl);
i = TMPL_PARMS_DEPTH (parms);
if (TMPL_ARGS_DEPTH (args) != i)
{
error ("expected %d levels of template parms for %q#D, got %d",
i, decl, TMPL_ARGS_DEPTH (args));
DECL_INTERFACE_KNOWN (decl) = 1;
return error_mark_node;
}
else
for (current = decl; i > 0; --i, parms = TREE_CHAIN (parms))
{
a = TMPL_ARGS_LEVEL (args, i);
t = INNERMOST_TEMPLATE_PARMS (parms);
if (TREE_VEC_LENGTH (t) != TREE_VEC_LENGTH (a))
{
if (current == decl)
error ("got %d template parameters for %q#D",
TREE_VEC_LENGTH (a), decl);
else
error ("got %d template parameters for %q#T",
TREE_VEC_LENGTH (a), current);
error (" but %d required", TREE_VEC_LENGTH (t));
/* Avoid crash in import_export_decl. */
DECL_INTERFACE_KNOWN (decl) = 1;
return error_mark_node;
}
if (current == decl)
current = ctx;
else if (current == NULL_TREE)
/* Can happen in erroneous input. */
break;
else
current = get_containing_scope (current);
}
/* Check that the parms are used in the appropriate qualifying scopes
in the declarator. */
if (!comp_template_args
(TI_ARGS (tinfo),
TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl)))))
{
error ("\
template arguments to %qD do not match original template %qD",
decl, DECL_TEMPLATE_RESULT (tmpl));
if (!uses_template_parms (TI_ARGS (tinfo)))
inform (input_location, "use template<> for an explicit specialization");
/* Avoid crash in import_export_decl. */
DECL_INTERFACE_KNOWN (decl) = 1;
return error_mark_node;
}
}
DECL_TEMPLATE_RESULT (tmpl) = decl;
TREE_TYPE (tmpl) = TREE_TYPE (decl);
/* Push template declarations for global functions and types. Note
that we do not try to push a global template friend declared in a
template class; such a thing may well depend on the template
parameters of the class. */
if (new_template_p && !ctx
&& !(is_friend && template_class_depth (current_class_type) > 0))
{
tmpl = pushdecl_namespace_level (tmpl, is_friend);
if (tmpl == error_mark_node)
return error_mark_node;
/* Hide template friend classes that haven't been declared yet. */
if (is_friend && TREE_CODE (decl) == TYPE_DECL)
{
DECL_ANTICIPATED (tmpl) = 1;
DECL_FRIEND_P (tmpl) = 1;
}
}
if (is_primary)
{
tree parms = DECL_TEMPLATE_PARMS (tmpl);
int i;
DECL_PRIMARY_TEMPLATE (tmpl) = tmpl;
if (DECL_CONV_FN_P (tmpl))
{
int depth = TMPL_PARMS_DEPTH (parms);
/* It is a conversion operator. See if the type converted to
depends on innermost template operands. */
if (uses_template_parms_level (TREE_TYPE (TREE_TYPE (tmpl)),
depth))
DECL_TEMPLATE_CONV_FN_P (tmpl) = 1;
}
/* Give template template parms a DECL_CONTEXT of the template
for which they are a parameter. */
parms = INNERMOST_TEMPLATE_PARMS (parms);
for (i = TREE_VEC_LENGTH (parms) - 1; i >= 0; --i)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (TREE_CODE (parm) == TEMPLATE_DECL)
DECL_CONTEXT (parm) = tmpl;
}
if (TREE_CODE (decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (decl)
&& complex_alias_template_p (tmpl))
TEMPLATE_DECL_COMPLEX_ALIAS_P (tmpl) = true;
}
/* The DECL_TI_ARGS of DECL contains full set of arguments referring
back to its most general template. If TMPL is a specialization,
ARGS may only have the innermost set of arguments. Add the missing
argument levels if necessary. */
if (DECL_TEMPLATE_INFO (tmpl))
args = add_outermost_template_args (DECL_TI_ARGS (tmpl), args);
info = build_template_info (tmpl, args);
if (DECL_IMPLICIT_TYPEDEF_P (decl))
SET_TYPE_TEMPLATE_INFO (TREE_TYPE (tmpl), info);
else
{
if (is_primary && !DECL_LANG_SPECIFIC (decl))
retrofit_lang_decl (decl);
if (DECL_LANG_SPECIFIC (decl))
DECL_TEMPLATE_INFO (decl) = info;
}
if (flag_implicit_templates
&& !is_friend
&& TREE_PUBLIC (decl)
&& VAR_OR_FUNCTION_DECL_P (decl))
/* Set DECL_COMDAT on template instantiations; if we force
them to be emitted by explicit instantiation or -frepo,
mark_needed will tell cgraph to do the right thing. */
DECL_COMDAT (decl) = true;
return DECL_TEMPLATE_RESULT (tmpl);
}
tree
push_template_decl (tree decl)
{
return push_template_decl_real (decl, false);
}
/* FN is an inheriting constructor that inherits from the constructor
template INHERITED; turn FN into a constructor template with a matching
template header. */
tree
add_inherited_template_parms (tree fn, tree inherited)
{
tree inner_parms
= INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (inherited));
inner_parms = copy_node (inner_parms);
tree parms
= tree_cons (size_int (processing_template_decl + 1),
inner_parms, current_template_parms);
tree tmpl = build_template_decl (fn, parms, /*member*/true);
tree args = template_parms_to_args (parms);
DECL_TEMPLATE_INFO (fn) = build_template_info (tmpl, args);
TREE_TYPE (tmpl) = TREE_TYPE (fn);
DECL_TEMPLATE_RESULT (tmpl) = fn;
DECL_ARTIFICIAL (tmpl) = true;
DECL_PRIMARY_TEMPLATE (tmpl) = tmpl;
return tmpl;
}
/* Called when a class template TYPE is redeclared with the indicated
template PARMS, e.g.:
template <class T> struct S;
template <class T> struct S {}; */
bool
redeclare_class_template (tree type, tree parms, tree cons)
{
tree tmpl;
tree tmpl_parms;
int i;
if (!TYPE_TEMPLATE_INFO (type))
{
error ("%qT is not a template type", type);
return false;
}
tmpl = TYPE_TI_TEMPLATE (type);
if (!PRIMARY_TEMPLATE_P (tmpl))
/* The type is nested in some template class. Nothing to worry
about here; there are no new template parameters for the nested
type. */
return true;
if (!parms)
{
error ("template specifiers not specified in declaration of %qD",
tmpl);
return false;
}
parms = INNERMOST_TEMPLATE_PARMS (parms);
tmpl_parms = DECL_INNERMOST_TEMPLATE_PARMS (tmpl);
if (TREE_VEC_LENGTH (parms) != TREE_VEC_LENGTH (tmpl_parms))
{
error_n (input_location, TREE_VEC_LENGTH (parms),
"redeclared with %d template parameter",
"redeclared with %d template parameters",
TREE_VEC_LENGTH (parms));
inform_n (DECL_SOURCE_LOCATION (tmpl), TREE_VEC_LENGTH (tmpl_parms),
"previous declaration %qD used %d template parameter",
"previous declaration %qD used %d template parameters",
tmpl, TREE_VEC_LENGTH (tmpl_parms));
return false;
}
for (i = 0; i < TREE_VEC_LENGTH (tmpl_parms); ++i)
{
tree tmpl_parm;
tree parm;
tree tmpl_default;
tree parm_default;
if (TREE_VEC_ELT (tmpl_parms, i) == error_mark_node
|| TREE_VEC_ELT (parms, i) == error_mark_node)
continue;
tmpl_parm = TREE_VALUE (TREE_VEC_ELT (tmpl_parms, i));
if (error_operand_p (tmpl_parm))
return false;
parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
tmpl_default = TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i));
parm_default = TREE_PURPOSE (TREE_VEC_ELT (parms, i));
/* TMPL_PARM and PARM can be either TYPE_DECL, PARM_DECL, or
TEMPLATE_DECL. */
if (TREE_CODE (tmpl_parm) != TREE_CODE (parm)
|| (TREE_CODE (tmpl_parm) != TYPE_DECL
&& !same_type_p (TREE_TYPE (tmpl_parm), TREE_TYPE (parm)))
|| (TREE_CODE (tmpl_parm) != PARM_DECL
&& (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (tmpl_parm))
!= TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))))
|| (TREE_CODE (tmpl_parm) == PARM_DECL
&& (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (tmpl_parm))
!= TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))))
{
error ("template parameter %q+#D", tmpl_parm);
error ("redeclared here as %q#D", parm);
return false;
}
if (tmpl_default != NULL_TREE && parm_default != NULL_TREE)
{
/* We have in [temp.param]:
A template-parameter may not be given default arguments
by two different declarations in the same scope. */
error_at (input_location, "redefinition of default argument for %q#D", parm);
inform (DECL_SOURCE_LOCATION (tmpl_parm),
"original definition appeared here");
return false;
}
if (parm_default != NULL_TREE)
/* Update the previous template parameters (which are the ones
that will really count) with the new default value. */
TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)) = parm_default;
else if (tmpl_default != NULL_TREE)
/* Update the new parameters, too; they'll be used as the
parameters for any members. */
TREE_PURPOSE (TREE_VEC_ELT (parms, i)) = tmpl_default;
/* Give each template template parm in this redeclaration a
DECL_CONTEXT of the template for which they are a parameter. */
if (TREE_CODE (parm) == TEMPLATE_DECL)
{
gcc_assert (DECL_CONTEXT (parm) == NULL_TREE);
DECL_CONTEXT (parm) = tmpl;
}
if (TREE_CODE (parm) == TYPE_DECL)
TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (parm)) = true;
}
// Cannot redeclare a class template with a different set of constraints.
if (!equivalent_constraints (get_constraints (tmpl), cons))
{
error_at (input_location, "redeclaration %q#D with different "
"constraints", tmpl);
inform (DECL_SOURCE_LOCATION (tmpl),
"original declaration appeared here");
}
return true;
}
/* The actual substitution part of instantiate_non_dependent_expr_sfinae,
to be used when the caller has already checked
(processing_template_decl
&& !instantiation_dependent_expression_p (expr)
&& potential_constant_expression (expr))
and cleared processing_template_decl. */
tree
instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain)
{
return tsubst_copy_and_build (expr,
/*args=*/NULL_TREE,
complain,
/*in_decl=*/NULL_TREE,
/*function_p=*/false,
/*integral_constant_expression_p=*/true);
}
/* Simplify EXPR if it is a non-dependent expression. Returns the
(possibly simplified) expression. */
tree
instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain)
{
if (expr == NULL_TREE)
return NULL_TREE;
/* If we're in a template, but EXPR isn't value dependent, simplify
it. We're supposed to treat:
template <typename T> void f(T[1 + 1]);
template <typename T> void f(T[2]);
as two declarations of the same function, for example. */
if (processing_template_decl
&& potential_nondependent_constant_expression (expr))
{
processing_template_decl_sentinel s;
expr = instantiate_non_dependent_expr_internal (expr, complain);
}
return expr;
}
tree
instantiate_non_dependent_expr (tree expr)
{
return instantiate_non_dependent_expr_sfinae (expr, tf_error);
}
/* Like instantiate_non_dependent_expr, but return NULL_TREE rather than
an uninstantiated expression. */
tree
instantiate_non_dependent_or_null (tree expr)
{
if (expr == NULL_TREE)
return NULL_TREE;
if (processing_template_decl)
{
if (!potential_nondependent_constant_expression (expr))
expr = NULL_TREE;
else
{
processing_template_decl_sentinel s;
expr = instantiate_non_dependent_expr_internal (expr, tf_error);
}
}
return expr;
}
/* True iff T is a specialization of a variable template. */
bool
variable_template_specialization_p (tree t)
{
if (!VAR_P (t) || !DECL_LANG_SPECIFIC (t) || !DECL_TEMPLATE_INFO (t))
return false;
tree tmpl = DECL_TI_TEMPLATE (t);
return variable_template_p (tmpl);
}
/* Return TRUE iff T is a type alias, a TEMPLATE_DECL for an alias
template declaration, or a TYPE_DECL for an alias declaration. */
bool
alias_type_or_template_p (tree t)
{
if (t == NULL_TREE)
return false;
return ((TREE_CODE (t) == TYPE_DECL && TYPE_DECL_ALIAS_P (t))
|| (TYPE_P (t)
&& TYPE_NAME (t)
&& TYPE_DECL_ALIAS_P (TYPE_NAME (t)))
|| DECL_ALIAS_TEMPLATE_P (t));
}
/* Return TRUE iff T is a specialization of an alias template. */
bool
alias_template_specialization_p (const_tree t)
{
/* It's an alias template specialization if it's an alias and its
TYPE_NAME is a specialization of a primary template. */
if (TYPE_ALIAS_P (t))
if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t))
return PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo));
return false;
}
/* An alias template is complex from a SFINAE perspective if a template-id
using that alias can be ill-formed when the expansion is not, as with
the void_t template. We determine this by checking whether the
expansion for the alias template uses all its template parameters. */
struct uses_all_template_parms_data
{
int level;
bool *seen;
};
static int
uses_all_template_parms_r (tree t, void *data_)
{
struct uses_all_template_parms_data &data
= *(struct uses_all_template_parms_data*)data_;
tree idx = get_template_parm_index (t);
if (TEMPLATE_PARM_LEVEL (idx) == data.level)
data.seen[TEMPLATE_PARM_IDX (idx)] = true;
return 0;
}
static bool
complex_alias_template_p (const_tree tmpl)
{
struct uses_all_template_parms_data data;
tree pat = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
tree parms = DECL_TEMPLATE_PARMS (tmpl);
data.level = TMPL_PARMS_DEPTH (parms);
int len = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (parms));
data.seen = XALLOCAVEC (bool, len);
for (int i = 0; i < len; ++i)
data.seen[i] = false;
for_each_template_parm (pat, uses_all_template_parms_r, &data, NULL, true);
for (int i = 0; i < len; ++i)
if (!data.seen[i])
return true;
return false;
}
/* Return TRUE iff T is a specialization of a complex alias template with
dependent template-arguments. */
bool
dependent_alias_template_spec_p (const_tree t)
{
if (!alias_template_specialization_p (t))
return false;
tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t);
if (!TEMPLATE_DECL_COMPLEX_ALIAS_P (TI_TEMPLATE (tinfo)))
return false;
tree args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo));
if (!any_dependent_template_arguments_p (args))
return false;
return true;
}
/* Return the number of innermost template parameters in TMPL. */
static int
num_innermost_template_parms (tree tmpl)
{
tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl));
return TREE_VEC_LENGTH (parms);
}
/* Return either TMPL or another template that it is equivalent to under DR
1286: An alias that just changes the name of a template is equivalent to
the other template. */
static tree
get_underlying_template (tree tmpl)
{
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL);
while (DECL_ALIAS_TEMPLATE_P (tmpl))
{
/* Determine if the alias is equivalent to an underlying template. */
tree orig_type = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (orig_type);
if (!tinfo)
break;
tree underlying = TI_TEMPLATE (tinfo);
if (!PRIMARY_TEMPLATE_P (underlying)
|| (num_innermost_template_parms (tmpl)
!= num_innermost_template_parms (underlying)))
break;
tree alias_args = INNERMOST_TEMPLATE_ARGS
(template_parms_to_args (DECL_TEMPLATE_PARMS (tmpl)));
if (!comp_template_args (TI_ARGS (tinfo), alias_args))
break;
/* Alias is equivalent. Strip it and repeat. */
tmpl = underlying;
}
return tmpl;
}
/* Subroutine of convert_nontype_argument. Converts EXPR to TYPE, which
must be a reference-to-function or a pointer-to-function type, as specified
in [temp.arg.nontype]: disambiguate EXPR if it is an overload set,
and check that the resulting function has external linkage. */
static tree
convert_nontype_argument_function (tree type, tree expr,
tsubst_flags_t complain)
{
tree fns = expr;
tree fn, fn_no_ptr;
linkage_kind linkage;
fn = instantiate_type (type, fns, tf_none);
if (fn == error_mark_node)
return error_mark_node;
if (value_dependent_expression_p (fn))
goto accept;
fn_no_ptr = strip_fnptr_conv (fn);
if (TREE_CODE (fn_no_ptr) == ADDR_EXPR)
fn_no_ptr = TREE_OPERAND (fn_no_ptr, 0);
if (BASELINK_P (fn_no_ptr))
fn_no_ptr = BASELINK_FUNCTIONS (fn_no_ptr);
/* [temp.arg.nontype]/1
A template-argument for a non-type, non-template template-parameter
shall be one of:
[...]
-- the address of an object or function with external [C++11: or
internal] linkage. */
if (TREE_CODE (fn_no_ptr) != FUNCTION_DECL)
{
if (complain & tf_error)
{
error ("%qE is not a valid template argument for type %qT",
expr, type);
if (TYPE_PTR_P (type))
inform (input_location, "it must be the address of a function "
"with external linkage");
else
inform (input_location, "it must be the name of a function with "
"external linkage");
}
return NULL_TREE;
}
linkage = decl_linkage (fn_no_ptr);
if (cxx_dialect >= cxx11 ? linkage == lk_none : linkage != lk_external)
{
if (complain & tf_error)
{
if (cxx_dialect >= cxx11)
error ("%qE is not a valid template argument for type %qT "
"because %qD has no linkage",
expr, type, fn_no_ptr);
else
error ("%qE is not a valid template argument for type %qT "
"because %qD does not have external linkage",
expr, type, fn_no_ptr);
}
return NULL_TREE;
}
accept:
if (TREE_CODE (type) == REFERENCE_TYPE)
{
if (REFERENCE_REF_P (fn))
fn = TREE_OPERAND (fn, 0);
else
fn = build_address (fn);
}
if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (fn)))
fn = build_nop (type, fn);
return fn;
}
/* Subroutine of convert_nontype_argument.
Check if EXPR of type TYPE is a valid pointer-to-member constant.
Emit an error otherwise. */
static bool
check_valid_ptrmem_cst_expr (tree type, tree expr,
tsubst_flags_t complain)
{
STRIP_NOPS (expr);
if (expr && (null_ptr_cst_p (expr) || TREE_CODE (expr) == PTRMEM_CST))
return true;
if (cxx_dialect >= cxx11 && null_member_pointer_value_p (expr))
return true;
if (processing_template_decl
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == OFFSET_REF)
return true;
if (complain & tf_error)
{
error ("%qE is not a valid template argument for type %qT",
expr, type);
error ("it must be a pointer-to-member of the form %<&X::Y%>");
}
return false;
}
/* Returns TRUE iff the address of OP is value-dependent.
14.6.2.4 [temp.dep.temp]:
A non-integral non-type template-argument is dependent if its type is
dependent or it has either of the following forms
qualified-id
& qualified-id
and contains a nested-name-specifier which specifies a class-name that
names a dependent type.
We generalize this to just say that the address of a member of a
dependent class is value-dependent; the above doesn't cover the
address of a static data member named with an unqualified-id. */
static bool
has_value_dependent_address (tree op)
{
/* We could use get_inner_reference here, but there's no need;
this is only relevant for template non-type arguments, which
can only be expressed as &id-expression. */
if (DECL_P (op))
{
tree ctx = CP_DECL_CONTEXT (op);
if (TYPE_P (ctx) && dependent_type_p (ctx))
return true;
}
return false;
}
/* The next set of functions are used for providing helpful explanatory
diagnostics for failed overload resolution. Their messages should be
indented by two spaces for consistency with the messages in
call.c */
static int
unify_success (bool /*explain_p*/)
{
return 0;
}
static int
unify_parameter_deduction_failure (bool explain_p, tree parm)
{
if (explain_p)
inform (input_location,
" couldn't deduce template parameter %qD", parm);
return 1;
}
static int
unify_invalid (bool /*explain_p*/)
{
return 1;
}
static int
unify_cv_qual_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" types %qT and %qT have incompatible cv-qualifiers",
parm, arg);
return 1;
}
static int
unify_type_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location, " mismatched types %qT and %qT", parm, arg);
return 1;
}
static int
unify_parameter_pack_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" template parameter %qD is not a parameter pack, but "
"argument %qD is",
parm, arg);
return 1;
}
static int
unify_ptrmem_cst_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" template argument %qE does not match "
"pointer-to-member constant %qE",
arg, parm);
return 1;
}
static int
unify_expression_unequal (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location, " %qE is not equivalent to %qE", parm, arg);
return 1;
}
static int
unify_parameter_pack_inconsistent (bool explain_p, tree old_arg, tree new_arg)
{
if (explain_p)
inform (input_location,
" inconsistent parameter pack deduction with %qT and %qT",
old_arg, new_arg);
return 1;
}
static int
unify_inconsistency (bool explain_p, tree parm, tree first, tree second)
{
if (explain_p)
{
if (TYPE_P (parm))
inform (input_location,
" deduced conflicting types for parameter %qT (%qT and %qT)",
parm, first, second);
else
inform (input_location,
" deduced conflicting values for non-type parameter "
"%qE (%qE and %qE)", parm, first, second);
}
return 1;
}
static int
unify_vla_arg (bool explain_p, tree arg)
{
if (explain_p)
inform (input_location,
" variable-sized array type %qT is not "
"a valid template argument",
arg);
return 1;
}
static int
unify_method_type_error (bool explain_p, tree arg)
{
if (explain_p)
inform (input_location,
" member function type %qT is not a valid template argument",
arg);
return 1;
}
static int
unify_arity (bool explain_p, int have, int wanted, bool least_p = false)
{
if (explain_p)
{
if (least_p)
inform_n (input_location, wanted,
" candidate expects at least %d argument, %d provided",
" candidate expects at least %d arguments, %d provided",
wanted, have);
else
inform_n (input_location, wanted,
" candidate expects %d argument, %d provided",
" candidate expects %d arguments, %d provided",
wanted, have);
}
return 1;
}
static int
unify_too_many_arguments (bool explain_p, int have, int wanted)
{
return unify_arity (explain_p, have, wanted);
}
static int
unify_too_few_arguments (bool explain_p, int have, int wanted,
bool least_p = false)
{
return unify_arity (explain_p, have, wanted, least_p);
}
static int
unify_arg_conversion (bool explain_p, tree to_type,
tree from_type, tree arg)
{
if (explain_p)
inform (EXPR_LOC_OR_LOC (arg, input_location),
" cannot convert %qE (type %qT) to type %qT",
arg, from_type, to_type);
return 1;
}
static int
unify_no_common_base (bool explain_p, enum template_base_result r,
tree parm, tree arg)
{
if (explain_p)
switch (r)
{
case tbr_ambiguous_baseclass:
inform (input_location, " %qT is an ambiguous base class of %qT",
parm, arg);
break;
default:
inform (input_location, " %qT is not derived from %qT", arg, parm);
break;
}
return 1;
}
static int
unify_inconsistent_template_template_parameters (bool explain_p)
{
if (explain_p)
inform (input_location,
" template parameters of a template template argument are "
"inconsistent with other deduced template arguments");
return 1;
}
static int
unify_template_deduction_failure (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" can't deduce a template for %qT from non-template type %qT",
parm, arg);
return 1;
}
static int
unify_template_argument_mismatch (bool explain_p, tree parm, tree arg)
{
if (explain_p)
inform (input_location,
" template argument %qE does not match %qE", arg, parm);
return 1;
}
static int
unify_overload_resolution_failure (bool explain_p, tree arg)
{
if (explain_p)
inform (input_location,
" could not resolve address from overloaded function %qE",
arg);
return 1;
}
/* Attempt to convert the non-type template parameter EXPR to the
indicated TYPE. If the conversion is successful, return the
converted value. If the conversion is unsuccessful, return
NULL_TREE if we issued an error message, or error_mark_node if we
did not. We issue error messages for out-and-out bad template
parameters, but not simply because the conversion failed, since we
might be just trying to do argument deduction. Both TYPE and EXPR
must be non-dependent.
The conversion follows the special rules described in
[temp.arg.nontype], and it is much more strict than an implicit
conversion.
This function is called twice for each template argument (see
lookup_template_class for a more accurate description of this
problem). This means that we need to handle expressions which
are not valid in a C++ source, but can be created from the
first call (for instance, casts to perform conversions). These
hacks can go away after we fix the double coercion problem. */
static tree
convert_nontype_argument (tree type, tree expr, tsubst_flags_t complain)
{
tree expr_type;
/* Detect immediately string literals as invalid non-type argument.
This special-case is not needed for correctness (we would easily
catch this later), but only to provide better diagnostic for this
common user mistake. As suggested by DR 100, we do not mention
linkage issues in the diagnostic as this is not the point. */
/* FIXME we're making this OK. */
if (TREE_CODE (expr) == STRING_CST)
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because string literals can never be used in this context",
expr, type);
return NULL_TREE;
}
/* Add the ADDR_EXPR now for the benefit of
value_dependent_expression_p. */
if (TYPE_PTROBV_P (type)
&& TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE)
{
expr = decay_conversion (expr, complain);
if (expr == error_mark_node)
return error_mark_node;
}
/* If we are in a template, EXPR may be non-dependent, but still
have a syntactic, rather than semantic, form. For example, EXPR
might be a SCOPE_REF, rather than the VAR_DECL to which the
SCOPE_REF refers. Preserving the qualifying scope is necessary
so that access checking can be performed when the template is
instantiated -- but here we need the resolved form so that we can
convert the argument. */
bool non_dep = false;
if (TYPE_REF_OBJ_P (type)
&& has_value_dependent_address (expr))
/* If we want the address and it's value-dependent, don't fold. */;
else if (processing_template_decl
&& potential_nondependent_constant_expression (expr))
non_dep = true;
if (error_operand_p (expr))
return error_mark_node;
expr_type = TREE_TYPE (expr);
if (TREE_CODE (type) == REFERENCE_TYPE)
expr = mark_lvalue_use (expr);
else
expr = mark_rvalue_use (expr);
/* If the argument is non-dependent, perform any conversions in
non-dependent context as well. */
processing_template_decl_sentinel s (non_dep);
if (non_dep)
expr = instantiate_non_dependent_expr_internal (expr, complain);
if (value_dependent_expression_p (expr))
expr = canonicalize_expr_argument (expr, complain);
/* 14.3.2/5: The null pointer{,-to-member} conversion is applied
to a non-type argument of "nullptr". */
if (expr == nullptr_node && TYPE_PTR_OR_PTRMEM_P (type))
expr = fold_simple (convert (type, expr));
/* In C++11, integral or enumeration non-type template arguments can be
arbitrary constant expressions. Pointer and pointer to
member arguments can be general constant expressions that evaluate
to a null value, but otherwise still need to be of a specific form. */
if (cxx_dialect >= cxx11)
{
if (TREE_CODE (expr) == PTRMEM_CST)
/* A PTRMEM_CST is already constant, and a valid template
argument for a parameter of pointer to member type, we just want
to leave it in that form rather than lower it to a
CONSTRUCTOR. */;
else if (INTEGRAL_OR_ENUMERATION_TYPE_P (type))
/* Constant value checking is done later with type conversion. */;
else if (cxx_dialect >= cxx1z)
{
if (TREE_CODE (type) != REFERENCE_TYPE)
expr = maybe_constant_value (expr);
else if (REFERENCE_REF_P (expr))
{
expr = TREE_OPERAND (expr, 0);
expr = maybe_constant_value (expr);
expr = convert_from_reference (expr);
}
}
else if (TYPE_PTR_OR_PTRMEM_P (type))
{
tree folded = maybe_constant_value (expr);
if (TYPE_PTR_P (type) ? integer_zerop (folded)
: null_member_pointer_value_p (folded))
expr = folded;
}
}
/* HACK: Due to double coercion, we can get a
NOP_EXPR<REFERENCE_TYPE>(ADDR_EXPR<POINTER_TYPE> (arg)) here,
which is the tree that we built on the first call (see
below when coercing to reference to object or to reference to
function). We just strip everything and get to the arg.
See g++.old-deja/g++.oliva/template4.C and g++.dg/template/nontype9.C
for examples. */
if (TYPE_REF_OBJ_P (type) || TYPE_REFFN_P (type))
{
tree probe_type, probe = expr;
if (REFERENCE_REF_P (probe))
probe = TREE_OPERAND (probe, 0);
probe_type = TREE_TYPE (probe);
if (TREE_CODE (probe) == NOP_EXPR)
{
/* ??? Maybe we could use convert_from_reference here, but we
would need to relax its constraints because the NOP_EXPR
could actually change the type to something more cv-qualified,
and this is not folded by convert_from_reference. */
tree addr = TREE_OPERAND (probe, 0);
if (TREE_CODE (probe_type) == REFERENCE_TYPE
&& TREE_CODE (addr) == ADDR_EXPR
&& TYPE_PTR_P (TREE_TYPE (addr))
&& (same_type_ignoring_top_level_qualifiers_p
(TREE_TYPE (probe_type),
TREE_TYPE (TREE_TYPE (addr)))))
{
expr = TREE_OPERAND (addr, 0);
expr_type = TREE_TYPE (probe_type);
}
}
}
/* We could also generate a NOP_EXPR(ADDR_EXPR()) when the
parameter is a pointer to object, through decay and
qualification conversion. Let's strip everything. */
else if (TREE_CODE (expr) == NOP_EXPR && TYPE_PTROBV_P (type))
{
tree probe = expr;
STRIP_NOPS (probe);
if (TREE_CODE (probe) == ADDR_EXPR
&& TYPE_PTR_P (TREE_TYPE (probe)))
{
/* Skip the ADDR_EXPR only if it is part of the decay for
an array. Otherwise, it is part of the original argument
in the source code. */
if (TREE_CODE (TREE_TYPE (TREE_OPERAND (probe, 0))) == ARRAY_TYPE)
probe = TREE_OPERAND (probe, 0);
expr = probe;
expr_type = TREE_TYPE (expr);
}
}
/* [temp.arg.nontype]/5, bullet 1
For a non-type template-parameter of integral or enumeration type,
integral promotions (_conv.prom_) and integral conversions
(_conv.integral_) are applied. */
if (INTEGRAL_OR_ENUMERATION_TYPE_P (type))
{
tree t = build_integral_nontype_arg_conv (type, expr, complain);
t = maybe_constant_value (t);
if (t != error_mark_node)
expr = t;
if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr)))
return error_mark_node;
/* Notice that there are constant expressions like '4 % 0' which
do not fold into integer constants. */
if (TREE_CODE (expr) != INTEGER_CST
&& !value_dependent_expression_p (expr))
{
if (complain & tf_error)
{
int errs = errorcount, warns = warningcount + werrorcount;
if (processing_template_decl
&& !require_potential_constant_expression (expr))
return NULL_TREE;
expr = cxx_constant_value (expr);
if (errorcount > errs || warningcount + werrorcount > warns)
inform (EXPR_LOC_OR_LOC (expr, input_location),
"in template argument for type %qT ", type);
if (expr == error_mark_node)
return NULL_TREE;
/* else cxx_constant_value complained but gave us
a real constant, so go ahead. */
if (TREE_CODE (expr) != INTEGER_CST)
{
/* Some assemble time constant expressions like
(intptr_t)&&lab1 - (intptr_t)&&lab2 or
4 + (intptr_t)&&var satisfy reduced_constant_expression_p
as we can emit them into .rodata initializers of
variables, yet they can't fold into an INTEGER_CST at
compile time. Refuse them here. */
gcc_checking_assert (reduced_constant_expression_p (expr));
location_t loc = EXPR_LOC_OR_LOC (expr, input_location);
error_at (loc, "template argument %qE for type %qT not "
"a constant integer", expr, type);
return NULL_TREE;
}
}
else
return NULL_TREE;
}
/* Avoid typedef problems. */
if (TREE_TYPE (expr) != type)
expr = fold_convert (type, expr);
}
/* [temp.arg.nontype]/5, bullet 2
For a non-type template-parameter of type pointer to object,
qualification conversions (_conv.qual_) and the array-to-pointer
conversion (_conv.array_) are applied. */
else if (TYPE_PTROBV_P (type))
{
/* [temp.arg.nontype]/1 (TC1 version, DR 49):
A template-argument for a non-type, non-template template-parameter
shall be one of: [...]
-- the name of a non-type template-parameter;
-- the address of an object or function with external linkage, [...]
expressed as "& id-expression" where the & is optional if the name
refers to a function or array, or if the corresponding
template-parameter is a reference.
Here, we do not care about functions, as they are invalid anyway
for a parameter of type pointer-to-object. */
if (value_dependent_expression_p (expr))
/* Non-type template parameters are OK. */
;
else if (cxx_dialect >= cxx11 && integer_zerop (expr))
/* Null pointer values are OK in C++11. */;
else if (TREE_CODE (expr) != ADDR_EXPR
&& TREE_CODE (expr_type) != ARRAY_TYPE)
{
if (VAR_P (expr))
{
if (complain & tf_error)
error ("%qD is not a valid template argument "
"because %qD is a variable, not the address of "
"a variable", expr, expr);
return NULL_TREE;
}
if (POINTER_TYPE_P (expr_type))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for %qT "
"because it is not the address of a variable",
expr, type);
return NULL_TREE;
}
/* Other values, like integer constants, might be valid
non-type arguments of some other type. */
return error_mark_node;
}
else
{
tree decl;
decl = ((TREE_CODE (expr) == ADDR_EXPR)
? TREE_OPERAND (expr, 0) : expr);
if (!VAR_P (decl))
{
if (complain & tf_error)
error ("%qE is not a valid template argument of type %qT "
"because %qE is not a variable", expr, type, decl);
return NULL_TREE;
}
else if (cxx_dialect < cxx11 && !DECL_EXTERNAL_LINKAGE_P (decl))
{
if (complain & tf_error)
error ("%qE is not a valid template argument of type %qT "
"because %qD does not have external linkage",
expr, type, decl);
return NULL_TREE;
}
else if ((cxx_dialect >= cxx11 && cxx_dialect < cxx1z)
&& decl_linkage (decl) == lk_none)
{
if (complain & tf_error)
error ("%qE is not a valid template argument of type %qT "
"because %qD has no linkage", expr, type, decl);
return NULL_TREE;
}
/* C++17: For a non-type template-parameter of reference or pointer
type, the value of the constant expression shall not refer to (or
for a pointer type, shall not be the address of):
* a subobject (4.5),
* a temporary object (15.2),
* a string literal (5.13.5),
* the result of a typeid expression (8.2.8), or
* a predefined __func__ variable (11.4.1). */
else if (DECL_ARTIFICIAL (decl))
{
if (complain & tf_error)
error ("the address of %qD is not a valid template argument",
decl);
return NULL_TREE;
}
else if (!same_type_ignoring_top_level_qualifiers_p
(strip_array_types (TREE_TYPE (type)),
strip_array_types (TREE_TYPE (decl))))
{
if (complain & tf_error)
error ("the address of the %qT subobject of %qD is not a "
"valid template argument", TREE_TYPE (type), decl);
return NULL_TREE;
}
else if (!TREE_STATIC (decl) && !DECL_EXTERNAL (decl))
{
if (complain & tf_error)
error ("the address of %qD is not a valid template argument "
"because it does not have static storage duration",
decl);
return NULL_TREE;
}
}
expr = decay_conversion (expr, complain);
if (expr == error_mark_node)
return error_mark_node;
expr = perform_qualification_conversions (type, expr);
if (expr == error_mark_node)
return error_mark_node;
}
/* [temp.arg.nontype]/5, bullet 3
For a non-type template-parameter of type reference to object, no
conversions apply. The type referred to by the reference may be more
cv-qualified than the (otherwise identical) type of the
template-argument. The template-parameter is bound directly to the
template-argument, which must be an lvalue. */
else if (TYPE_REF_OBJ_P (type))
{
if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (type),
expr_type))
return error_mark_node;
if (!at_least_as_qualified_p (TREE_TYPE (type), expr_type))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because of conflicts in cv-qualification", expr, type);
return NULL_TREE;
}
if (!lvalue_p (expr))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because it is not an lvalue", expr, type);
return NULL_TREE;
}
/* [temp.arg.nontype]/1
A template-argument for a non-type, non-template template-parameter
shall be one of: [...]
-- the address of an object or function with external linkage. */
if (INDIRECT_REF_P (expr)
&& TYPE_REF_OBJ_P (TREE_TYPE (TREE_OPERAND (expr, 0))))
{
expr = TREE_OPERAND (expr, 0);
if (DECL_P (expr))
{
if (complain & tf_error)
error ("%q#D is not a valid template argument for type %qT "
"because a reference variable does not have a constant "
"address", expr, type);
return NULL_TREE;
}
}
if (TYPE_REF_OBJ_P (TREE_TYPE (expr))
&& value_dependent_expression_p (expr))
/* OK, dependent reference. We don't want to ask whether a DECL is
itself value-dependent, since what we want here is its address. */;
else
{
if (!DECL_P (expr))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because it is not an object with linkage",
expr, type);
return NULL_TREE;
}
/* DR 1155 allows internal linkage in C++11 and up. */
linkage_kind linkage = decl_linkage (expr);
if (linkage < (cxx_dialect >= cxx11 ? lk_internal : lk_external))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because object %qD does not have linkage",
expr, type, expr);
return NULL_TREE;
}
expr = build_address (expr);
}
if (!same_type_p (type, TREE_TYPE (expr)))
expr = build_nop (type, expr);
}
/* [temp.arg.nontype]/5, bullet 4
For a non-type template-parameter of type pointer to function, only
the function-to-pointer conversion (_conv.func_) is applied. If the
template-argument represents a set of overloaded functions (or a
pointer to such), the matching function is selected from the set
(_over.over_). */
else if (TYPE_PTRFN_P (type))
{
/* If the argument is a template-id, we might not have enough
context information to decay the pointer. */
if (!type_unknown_p (expr_type))
{
expr = decay_conversion (expr, complain);
if (expr == error_mark_node)
return error_mark_node;
}
if (cxx_dialect >= cxx11 && integer_zerop (expr))
/* Null pointer values are OK in C++11. */
return perform_qualification_conversions (type, expr);
expr = convert_nontype_argument_function (type, expr, complain);
if (!expr || expr == error_mark_node)
return expr;
}
/* [temp.arg.nontype]/5, bullet 5
For a non-type template-parameter of type reference to function, no
conversions apply. If the template-argument represents a set of
overloaded functions, the matching function is selected from the set
(_over.over_). */
else if (TYPE_REFFN_P (type))
{
if (TREE_CODE (expr) == ADDR_EXPR)
{
if (complain & tf_error)
{
error ("%qE is not a valid template argument for type %qT "
"because it is a pointer", expr, type);
inform (input_location, "try using %qE instead",
TREE_OPERAND (expr, 0));
}
return NULL_TREE;
}
expr = convert_nontype_argument_function (type, expr, complain);
if (!expr || expr == error_mark_node)
return expr;
}
/* [temp.arg.nontype]/5, bullet 6
For a non-type template-parameter of type pointer to member function,
no conversions apply. If the template-argument represents a set of
overloaded member functions, the matching member function is selected
from the set (_over.over_). */
else if (TYPE_PTRMEMFUNC_P (type))
{
expr = instantiate_type (type, expr, tf_none);
if (expr == error_mark_node)
return error_mark_node;
/* [temp.arg.nontype] bullet 1 says the pointer to member
expression must be a pointer-to-member constant. */
if (!value_dependent_expression_p (expr)
&& !check_valid_ptrmem_cst_expr (type, expr, complain))
return error_mark_node;
/* Repeated conversion can't deal with a conversion that turns PTRMEM_CST
into a CONSTRUCTOR, so build up a new PTRMEM_CST instead. */
if (fnptr_conv_p (type, TREE_TYPE (expr)))
expr = make_ptrmem_cst (type, PTRMEM_CST_MEMBER (expr));
/* There is no way to disable standard conversions in
resolve_address_of_overloaded_function (called by
instantiate_type). It is possible that the call succeeded by
converting &B::I to &D::I (where B is a base of D), so we need
to reject this conversion here.
Actually, even if there was a way to disable standard conversions,
it would still be better to reject them here so that we can
provide a superior diagnostic. */
if (!same_type_p (TREE_TYPE (expr), type))
{
if (complain & tf_error)
{
error ("%qE is not a valid template argument for type %qT "
"because it is of type %qT", expr, type,
TREE_TYPE (expr));
/* If we are just one standard conversion off, explain. */
if (can_convert_standard (type, TREE_TYPE (expr), complain))
inform (input_location,
"standard conversions are not allowed in this context");
}
return NULL_TREE;
}
}
/* [temp.arg.nontype]/5, bullet 7
For a non-type template-parameter of type pointer to data member,
qualification conversions (_conv.qual_) are applied. */
else if (TYPE_PTRDATAMEM_P (type))
{
/* [temp.arg.nontype] bullet 1 says the pointer to member
expression must be a pointer-to-member constant. */
if (!value_dependent_expression_p (expr)
&& !check_valid_ptrmem_cst_expr (type, expr, complain))
return error_mark_node;
expr = perform_qualification_conversions (type, expr);
if (expr == error_mark_node)
return expr;
}
else if (NULLPTR_TYPE_P (type))
{
if (!NULLPTR_TYPE_P (TREE_TYPE (expr)))
{
if (complain & tf_error)
error ("%qE is not a valid template argument for type %qT "
"because it is of type %qT", expr, type, TREE_TYPE (expr));
return NULL_TREE;
}
return expr;
}
/* A template non-type parameter must be one of the above. */
else
gcc_unreachable ();
/* Sanity check: did we actually convert the argument to the
right type? */
gcc_assert (same_type_ignoring_top_level_qualifiers_p
(type, TREE_TYPE (expr)));
return convert_from_reference (expr);
}
/* Subroutine of coerce_template_template_parms, which returns 1 if
PARM_PARM and ARG_PARM match using the rule for the template
parameters of template template parameters. Both PARM and ARG are
template parameters; the rest of the arguments are the same as for
coerce_template_template_parms.
*/
static int
coerce_template_template_parm (tree parm,
tree arg,
tsubst_flags_t complain,
tree in_decl,
tree outer_args)
{
if (arg == NULL_TREE || error_operand_p (arg)
|| parm == NULL_TREE || error_operand_p (parm))
return 0;
if (TREE_CODE (arg) != TREE_CODE (parm))
return 0;
switch (TREE_CODE (parm))
{
case TEMPLATE_DECL:
/* We encounter instantiations of templates like
template <template <template <class> class> class TT>
class C; */
{
tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm);
tree argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg);
if (!coerce_template_template_parms
(parmparm, argparm, complain, in_decl, outer_args))
return 0;
}
/* Fall through. */
case TYPE_DECL:
if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (arg))
&& !TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))
/* Argument is a parameter pack but parameter is not. */
return 0;
break;
case PARM_DECL:
/* The tsubst call is used to handle cases such as
template <int> class C {};
template <class T, template <T> class TT> class D {};
D<int, C> d;
i.e. the parameter list of TT depends on earlier parameters. */
if (!uses_template_parms (TREE_TYPE (arg)))
{
tree t = tsubst (TREE_TYPE (parm), outer_args, complain, in_decl);
if (!uses_template_parms (t)
&& !same_type_p (t, TREE_TYPE (arg)))
return 0;
}
if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (arg))
&& !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
/* Argument is a parameter pack but parameter is not. */
return 0;
break;
default:
gcc_unreachable ();
}
return 1;
}
/* Coerce template argument list ARGLIST for use with template
template-parameter TEMPL. */
static tree
coerce_template_args_for_ttp (tree templ, tree arglist,
tsubst_flags_t complain)
{
/* Consider an example where a template template parameter declared as
template <class T, class U = std::allocator<T> > class TT
The template parameter level of T and U are one level larger than
of TT. To proper process the default argument of U, say when an
instantiation `TT<int>' is seen, we need to build the full
arguments containing {int} as the innermost level. Outer levels,
available when not appearing as default template argument, can be
obtained from the arguments of the enclosing template.
Suppose that TT is later substituted with std::vector. The above
instantiation is `TT<int, std::allocator<T> >' with TT at
level 1, and T at level 2, while the template arguments at level 1
becomes {std::vector} and the inner level 2 is {int}. */
tree outer = DECL_CONTEXT (templ);
if (outer)
{
if (DECL_TEMPLATE_SPECIALIZATION (outer))
/* We want arguments for the partial specialization, not arguments for
the primary template. */
outer = template_parms_to_args (DECL_TEMPLATE_PARMS (outer));
else
outer = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (outer)));
}
else if (current_template_parms)
{
/* This is an argument of the current template, so we haven't set
DECL_CONTEXT yet. */
tree relevant_template_parms;
/* Parameter levels that are greater than the level of the given
template template parm are irrelevant. */
relevant_template_parms = current_template_parms;
while (TMPL_PARMS_DEPTH (relevant_template_parms)
!= TEMPLATE_TYPE_LEVEL (TREE_TYPE (templ)))
relevant_template_parms = TREE_CHAIN (relevant_template_parms);
outer = template_parms_to_args (relevant_template_parms);
}
if (outer)
arglist = add_to_template_args (outer, arglist);
tree parmlist = DECL_INNERMOST_TEMPLATE_PARMS (templ);
return coerce_template_parms (parmlist, arglist, templ,
complain,
/*require_all_args=*/true,
/*use_default_args=*/true);
}
/* A cache of template template parameters with match-all default
arguments. */
static GTY((deletable)) hash_map<tree,tree> *defaulted_ttp_cache;
static void
store_defaulted_ttp (tree v, tree t)
{
if (!defaulted_ttp_cache)
defaulted_ttp_cache = hash_map<tree,tree>::create_ggc (13);
defaulted_ttp_cache->put (v, t);
}
static tree
lookup_defaulted_ttp (tree v)
{
if (defaulted_ttp_cache)
if (tree *p = defaulted_ttp_cache->get (v))
return *p;
return NULL_TREE;
}
/* T is a bound template template-parameter. Copy its arguments into default
arguments of the template template-parameter's template parameters. */
static tree
add_defaults_to_ttp (tree otmpl)
{
if (tree c = lookup_defaulted_ttp (otmpl))
return c;
tree ntmpl = copy_node (otmpl);
tree ntype = copy_node (TREE_TYPE (otmpl));
TYPE_STUB_DECL (ntype) = TYPE_NAME (ntype) = ntmpl;
TYPE_MAIN_VARIANT (ntype) = ntype;
TYPE_POINTER_TO (ntype) = TYPE_REFERENCE_TO (ntype) = NULL_TREE;
TYPE_NAME (ntype) = ntmpl;
SET_TYPE_STRUCTURAL_EQUALITY (ntype);
tree idx = TEMPLATE_TYPE_PARM_INDEX (ntype)
= copy_node (TEMPLATE_TYPE_PARM_INDEX (ntype));
TEMPLATE_PARM_DECL (idx) = ntmpl;
TREE_TYPE (ntmpl) = TREE_TYPE (idx) = ntype;
tree oparms = DECL_TEMPLATE_PARMS (otmpl);
tree parms = DECL_TEMPLATE_PARMS (ntmpl) = copy_node (oparms);
TREE_CHAIN (parms) = TREE_CHAIN (oparms);
tree vec = TREE_VALUE (parms) = copy_node (TREE_VALUE (parms));
for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i)
{
tree o = TREE_VEC_ELT (vec, i);
if (!template_parameter_pack_p (TREE_VALUE (o)))
{
tree n = TREE_VEC_ELT (vec, i) = copy_node (o);
TREE_PURPOSE (n) = any_targ_node;
}
}
store_defaulted_ttp (otmpl, ntmpl);
return ntmpl;
}
/* ARG is a bound potential template template-argument, and PARGS is a list
of arguments for the corresponding template template-parameter. Adjust
PARGS as appropriate for application to ARG's template, and if ARG is a
BOUND_TEMPLATE_TEMPLATE_PARM, possibly adjust it to add default template
arguments to the template template parameter. */
static tree
coerce_ttp_args_for_tta (tree& arg, tree pargs, tsubst_flags_t complain)
{
++processing_template_decl;
tree arg_tmpl = TYPE_TI_TEMPLATE (arg);
if (DECL_TEMPLATE_TEMPLATE_PARM_P (arg_tmpl))
{
/* When comparing two template template-parameters in partial ordering,
rewrite the one currently being used as an argument to have default
arguments for all parameters. */
arg_tmpl = add_defaults_to_ttp (arg_tmpl);
pargs = coerce_template_args_for_ttp (arg_tmpl, pargs, complain);
if (pargs != error_mark_node)
arg = bind_template_template_parm (TREE_TYPE (arg_tmpl),
TYPE_TI_ARGS (arg));
}
else
{
tree aparms
= INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (arg_tmpl));
pargs = coerce_template_parms (aparms, pargs, arg_tmpl, complain,
/*require_all*/true,
/*use_default*/true);
}
--processing_template_decl;
return pargs;
}
/* Subroutine of unify for the case when PARM is a
BOUND_TEMPLATE_TEMPLATE_PARM. */
static int
unify_bound_ttp_args (tree tparms, tree targs, tree parm, tree& arg,
bool explain_p)
{
tree parmvec = TYPE_TI_ARGS (parm);
tree argvec = INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (arg));
/* The template template parm might be variadic and the argument
not, so flatten both argument lists. */
parmvec = expand_template_argument_pack (parmvec);
argvec = expand_template_argument_pack (argvec);
if (flag_new_ttp)
{
/* In keeping with P0522R0, adjust P's template arguments
to apply to A's template; then flatten it again. */
tree nparmvec = parmvec;
nparmvec = coerce_ttp_args_for_tta (arg, parmvec, tf_none);
nparmvec = expand_template_argument_pack (nparmvec);
if (unify (tparms, targs, nparmvec, argvec,
UNIFY_ALLOW_NONE, explain_p))
return 1;
/* If the P0522 adjustment eliminated a pack expansion, deduce
empty packs. */
if (flag_new_ttp
&& TREE_VEC_LENGTH (nparmvec) < TREE_VEC_LENGTH (parmvec)
&& unify_pack_expansion (tparms, targs, parmvec, argvec,
DEDUCE_EXACT, /*sub*/true, explain_p))
return 1;
}
else
{
/* Deduce arguments T, i from TT<T> or TT<i>.
We check each element of PARMVEC and ARGVEC individually
rather than the whole TREE_VEC since they can have
different number of elements, which is allowed under N2555. */
int len = TREE_VEC_LENGTH (parmvec);
/* Check if the parameters end in a pack, making them
variadic. */
int parm_variadic_p = 0;
if (len > 0
&& PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, len - 1)))
parm_variadic_p = 1;
for (int i = 0; i < len - parm_variadic_p; ++i)
/* If the template argument list of P contains a pack
expansion that is not the last template argument, the
entire template argument list is a non-deduced
context. */
if (PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, i)))
return unify_success (explain_p);
if (TREE_VEC_LENGTH (argvec) < len - parm_variadic_p)
return unify_too_few_arguments (explain_p,
TREE_VEC_LENGTH (argvec), len);
for (int i = 0; i < len - parm_variadic_p; ++i)
if (unify (tparms, targs,
TREE_VEC_ELT (parmvec, i),
TREE_VEC_ELT (argvec, i),
UNIFY_ALLOW_NONE, explain_p))
return 1;
if (parm_variadic_p
&& unify_pack_expansion (tparms, targs,
parmvec, argvec,
DEDUCE_EXACT,
/*subr=*/true, explain_p))
return 1;
}
return 0;
}
/* Return 1 if PARM_PARMS and ARG_PARMS matches using rule for
template template parameters. Both PARM_PARMS and ARG_PARMS are
vectors of TREE_LIST nodes containing TYPE_DECL, TEMPLATE_DECL
or PARM_DECL.
Consider the example:
template <class T> class A;
template<template <class U> class TT> class B;
For B<A>, PARM_PARMS are the parameters to TT, while ARG_PARMS are
the parameters to A, and OUTER_ARGS contains A. */
static int
coerce_template_template_parms (tree parm_parms,
tree arg_parms,
tsubst_flags_t complain,
tree in_decl,
tree outer_args)
{
int nparms, nargs, i;
tree parm, arg;
int variadic_p = 0;
gcc_assert (TREE_CODE (parm_parms) == TREE_VEC);
gcc_assert (TREE_CODE (arg_parms) == TREE_VEC);
nparms = TREE_VEC_LENGTH (parm_parms);
nargs = TREE_VEC_LENGTH (arg_parms);
if (flag_new_ttp)
{
/* P0522R0: A template template-parameter P is at least as specialized as
a template template-argument A if, given the following rewrite to two
function templates, the function template corresponding to P is at
least as specialized as the function template corresponding to A
according to the partial ordering rules for function templates
([temp.func.order]). Given an invented class template X with the
template parameter list of A (including default arguments):
* Each of the two function templates has the same template parameters,
respectively, as P or A.
* Each function template has a single function parameter whose type is
a specialization of X with template arguments corresponding to the
template parameters from the respective function template where, for
each template parameter PP in the template parameter list of the
function template, a corresponding template argument AA is formed. If
PP declares a parameter pack, then AA is the pack expansion
PP... ([temp.variadic]); otherwise, AA is the id-expression PP.
If the rewrite produces an invalid type, then P is not at least as
specialized as A. */
/* So coerce P's args to apply to A's parms, and then deduce between A's
args and the converted args. If that succeeds, A is at least as
specialized as P, so they match.*/
tree pargs = template_parms_level_to_args (parm_parms);
++processing_template_decl;
pargs = coerce_template_parms (arg_parms, pargs, NULL_TREE, tf_none,
/*require_all*/true, /*use_default*/true);
--processing_template_decl;
if (pargs != error_mark_node)
{
tree targs = make_tree_vec (nargs);
tree aargs = template_parms_level_to_args (arg_parms);
if (!unify (arg_parms, targs, aargs, pargs, UNIFY_ALLOW_NONE,
/*explain*/false))
return 1;
}
}
/* Determine whether we have a parameter pack at the end of the
template template parameter's template parameter list. */
if (TREE_VEC_ELT (parm_parms, nparms - 1) != error_mark_node)
{
parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, nparms - 1));
if (error_operand_p (parm))
return 0;
switch (TREE_CODE (parm))
{
case TEMPLATE_DECL:
case TYPE_DECL:
if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))
variadic_p = 1;
break;
case PARM_DECL:
if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
variadic_p = 1;
break;
default:
gcc_unreachable ();
}
}
if (nargs != nparms
&& !(variadic_p && nargs >= nparms - 1))
return 0;
/* Check all of the template parameters except the parameter pack at
the end (if any). */
for (i = 0; i < nparms - variadic_p; ++i)
{
if (TREE_VEC_ELT (parm_parms, i) == error_mark_node
|| TREE_VEC_ELT (arg_parms, i) == error_mark_node)
continue;
parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i));
arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i));
if (!coerce_template_template_parm (parm, arg, complain, in_decl,
outer_args))
return 0;
}
if (variadic_p)
{
/* Check each of the template parameters in the template
argument against the template parameter pack at the end of
the template template parameter. */
if (TREE_VEC_ELT (parm_parms, i) == error_mark_node)
return 0;
parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i));
for (; i < nargs; ++i)
{
if (TREE_VEC_ELT (arg_parms, i) == error_mark_node)
continue;
arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i));
if (!coerce_template_template_parm (parm, arg, complain, in_decl,
outer_args))
return 0;
}
}
return 1;
}
/* Verifies that the deduced template arguments (in TARGS) for the
template template parameters (in TPARMS) represent valid bindings,
by comparing the template parameter list of each template argument
to the template parameter list of its corresponding template
template parameter, in accordance with DR150. This
routine can only be called after all template arguments have been
deduced. It will return TRUE if all of the template template
parameter bindings are okay, FALSE otherwise. */
bool
template_template_parm_bindings_ok_p (tree tparms, tree targs)
{
int i, ntparms = TREE_VEC_LENGTH (tparms);
bool ret = true;
/* We're dealing with template parms in this process. */
++processing_template_decl;
targs = INNERMOST_TEMPLATE_ARGS (targs);
for (i = 0; i < ntparms; ++i)
{
tree tparm = TREE_VALUE (TREE_VEC_ELT (tparms, i));
tree targ = TREE_VEC_ELT (targs, i);
if (TREE_CODE (tparm) == TEMPLATE_DECL && targ)
{
tree packed_args = NULL_TREE;
int idx, len = 1;
if (ARGUMENT_PACK_P (targ))
{
/* Look inside the argument pack. */
packed_args = ARGUMENT_PACK_ARGS (targ);
len = TREE_VEC_LENGTH (packed_args);
}
for (idx = 0; idx < len; ++idx)
{
tree targ_parms = NULL_TREE;
if (packed_args)
/* Extract the next argument from the argument
pack. */
targ = TREE_VEC_ELT (packed_args, idx);
if (PACK_EXPANSION_P (targ))
/* Look at the pattern of the pack expansion. */
targ = PACK_EXPANSION_PATTERN (targ);
/* Extract the template parameters from the template
argument. */
if (TREE_CODE (targ) == TEMPLATE_DECL)
targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (targ);
else if (TREE_CODE (targ) == TEMPLATE_TEMPLATE_PARM)
targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (TYPE_NAME (targ));
/* Verify that we can coerce the template template
parameters from the template argument to the template
parameter. This requires an exact match. */
if (targ_parms
&& !coerce_template_template_parms
(DECL_INNERMOST_TEMPLATE_PARMS (tparm),
targ_parms,
tf_none,
tparm,
targs))
{
ret = false;
goto out;
}
}
}
}
out:
--processing_template_decl;
return ret;
}
/* Since type attributes aren't mangled, we need to strip them from
template type arguments. */
static tree
canonicalize_type_argument (tree arg, tsubst_flags_t complain)
{
if (!arg || arg == error_mark_node || arg == TYPE_CANONICAL (arg))
return arg;
bool removed_attributes = false;
tree canon = strip_typedefs (arg, &removed_attributes);
if (removed_attributes
&& (complain & tf_warning))
warning (OPT_Wignored_attributes,
"ignoring attributes on template argument %qT", arg);
return canon;
}
/* And from inside dependent non-type arguments like sizeof(Type). */
static tree
canonicalize_expr_argument (tree arg, tsubst_flags_t complain)
{
if (!arg || arg == error_mark_node)
return arg;
bool removed_attributes = false;
tree canon = strip_typedefs_expr (arg, &removed_attributes);
if (removed_attributes
&& (complain & tf_warning))
warning (OPT_Wignored_attributes,
"ignoring attributes in template argument %qE", arg);
return canon;
}
// A template declaration can be substituted for a constrained
// template template parameter only when the argument is more
// constrained than the parameter.
static bool
is_compatible_template_arg (tree parm, tree arg)
{
tree parm_cons = get_constraints (parm);
/* For now, allow constrained template template arguments
and unconstrained template template parameters. */
if (parm_cons == NULL_TREE)
return true;
tree arg_cons = get_constraints (arg);
// If the template parameter is constrained, we need to rewrite its
// constraints in terms of the ARG's template parameters. This ensures
// that all of the template parameter types will have the same depth.
//
// Note that this is only valid when coerce_template_template_parm is
// true for the innermost template parameters of PARM and ARG. In other
// words, because coercion is successful, this conversion will be valid.
if (parm_cons)
{
tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (arg));
parm_cons = tsubst_constraint_info (parm_cons,
INNERMOST_TEMPLATE_ARGS (args),
tf_none, NULL_TREE);
if (parm_cons == error_mark_node)
return false;
}
return subsumes (parm_cons, arg_cons);
}
// Convert a placeholder argument into a binding to the original
// parameter. The original parameter is saved as the TREE_TYPE of
// ARG.
static inline tree
convert_wildcard_argument (tree parm, tree arg)
{
TREE_TYPE (arg) = parm;
return arg;
}
/* Convert the indicated template ARG as necessary to match the
indicated template PARM. Returns the converted ARG, or
error_mark_node if the conversion was unsuccessful. Error and
warning messages are issued under control of COMPLAIN. This
conversion is for the Ith parameter in the parameter list. ARGS is
the full set of template arguments deduced so far. */
static tree
convert_template_argument (tree parm,
tree arg,
tree args,
tsubst_flags_t complain,
int i,
tree in_decl)
{
tree orig_arg;
tree val;
int is_type, requires_type, is_tmpl_type, requires_tmpl_type;
if (parm == error_mark_node)
return error_mark_node;
/* Trivially convert placeholders. */
if (TREE_CODE (arg) == WILDCARD_DECL)
return convert_wildcard_argument (parm, arg);
if (arg == any_targ_node)
return arg;
if (TREE_CODE (arg) == TREE_LIST
&& TREE_CODE (TREE_VALUE (arg)) == OFFSET_REF)
{
/* The template argument was the name of some
member function. That's usually
invalid, but static members are OK. In any
case, grab the underlying fields/functions
and issue an error later if required. */
orig_arg = TREE_VALUE (arg);
TREE_TYPE (arg) = unknown_type_node;
}
orig_arg = arg;
requires_tmpl_type = TREE_CODE (parm) == TEMPLATE_DECL;
requires_type = (TREE_CODE (parm) == TYPE_DECL
|| requires_tmpl_type);
/* When determining whether an argument pack expansion is a template,
look at the pattern. */
if (TREE_CODE (arg) == TYPE_PACK_EXPANSION)
arg = PACK_EXPANSION_PATTERN (arg);
/* Deal with an injected-class-name used as a template template arg. */
if (requires_tmpl_type && CLASS_TYPE_P (arg))
{
tree t = maybe_get_template_decl_from_type_decl (TYPE_NAME (arg));
if (TREE_CODE (t) == TEMPLATE_DECL)
{
if (cxx_dialect >= cxx11)
/* OK under DR 1004. */;
else if (complain & tf_warning_or_error)
pedwarn (input_location, OPT_Wpedantic, "injected-class-name %qD"
" used as template template argument", TYPE_NAME (arg));
else if (flag_pedantic_errors)
t = arg;
arg = t;
}
}
is_tmpl_type =
((TREE_CODE (arg) == TEMPLATE_DECL
&& TREE_CODE (DECL_TEMPLATE_RESULT (arg)) == TYPE_DECL)
|| (requires_tmpl_type && TREE_CODE (arg) == TYPE_ARGUMENT_PACK)
|| TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE);
if (is_tmpl_type
&& (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE))
arg = TYPE_STUB_DECL (arg);
is_type = TYPE_P (arg) || is_tmpl_type;
if (requires_type && ! is_type && TREE_CODE (arg) == SCOPE_REF
&& TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_TYPE_PARM)
{
if (TREE_CODE (TREE_OPERAND (arg, 1)) == BIT_NOT_EXPR)
{
if (complain & tf_error)
error ("invalid use of destructor %qE as a type", orig_arg);
return error_mark_node;
}
permerror (input_location,
"to refer to a type member of a template parameter, "
"use %<typename %E%>", orig_arg);
orig_arg = make_typename_type (TREE_OPERAND (arg, 0),
TREE_OPERAND (arg, 1),
typename_type,
complain);
arg = orig_arg;
is_type = 1;
}
if (is_type != requires_type)
{
if (in_decl)
{
if (complain & tf_error)
{
error ("type/value mismatch at argument %d in template "
"parameter list for %qD",
i + 1, in_decl);
if (is_type)
inform (input_location,
" expected a constant of type %qT, got %qT",
TREE_TYPE (parm),
(DECL_P (arg) ? DECL_NAME (arg) : orig_arg));
else if (requires_tmpl_type)
inform (input_location,
" expected a class template, got %qE", orig_arg);
else
inform (input_location,
" expected a type, got %qE", orig_arg);
}
}
return error_mark_node;
}
if (is_tmpl_type ^ requires_tmpl_type)
{
if (in_decl && (complain & tf_error))
{
error ("type/value mismatch at argument %d in template "
"parameter list for %qD",
i + 1, in_decl);
if (is_tmpl_type)
inform (input_location,
" expected a type, got %qT", DECL_NAME (arg));
else
inform (input_location,
" expected a class template, got %qT", orig_arg);
}
return error_mark_node;
}
if (is_type)
{
if (requires_tmpl_type)
{
if (template_parameter_pack_p (parm) && ARGUMENT_PACK_P (orig_arg))
val = orig_arg;
else if (TREE_CODE (TREE_TYPE (arg)) == UNBOUND_CLASS_TEMPLATE)
/* The number of argument required is not known yet.
Just accept it for now. */
val = orig_arg;
else
{
tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm);
tree argparm;
/* Strip alias templates that are equivalent to another
template. */
arg = get_underlying_template (arg);
argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg);
if (coerce_template_template_parms (parmparm, argparm,
complain, in_decl,
args))
{
val = arg;
/* TEMPLATE_TEMPLATE_PARM node is preferred over
TEMPLATE_DECL. */
if (val != error_mark_node)
{
if (DECL_TEMPLATE_TEMPLATE_PARM_P (val))
val = TREE_TYPE (val);
if (TREE_CODE (orig_arg) == TYPE_PACK_EXPANSION)
val = make_pack_expansion (val);
}
}
else
{
if (in_decl && (complain & tf_error))
{
error ("type/value mismatch at argument %d in "
"template parameter list for %qD",
i + 1, in_decl);
inform (input_location,
" expected a template of type %qD, got %qT",
parm, orig_arg);
}
val = error_mark_node;
}
// Check that the constraints are compatible before allowing the
// substitution.
if (val != error_mark_node)
if (!is_compatible_template_arg (parm, arg))
{
if (in_decl && (complain & tf_error))
{
error ("constraint mismatch at argument %d in "
"template parameter list for %qD",
i + 1, in_decl);
inform (input_location, " expected %qD but got %qD",
parm, arg);
}
val = error_mark_node;
}
}
}
else
val = orig_arg;
/* We only form one instance of each template specialization.
Therefore, if we use a non-canonical variant (i.e., a
typedef), any future messages referring to the type will use
the typedef, which is confusing if those future uses do not
themselves also use the typedef. */
if (TYPE_P (val))
val = canonicalize_type_argument (val, complain);
}
else
{
tree t = TREE_TYPE (parm);
if (tree a = type_uses_auto (t))
{
if (ARGUMENT_PACK_P (orig_arg))
/* There's nothing to check for an auto argument pack. */
return orig_arg;
t = do_auto_deduction (t, arg, a, complain, adc_unify, args);
if (t == error_mark_node)
return error_mark_node;
}
else
t = tsubst (t, args, complain, in_decl);
if (invalid_nontype_parm_type_p (t, complain))
return error_mark_node;
if (template_parameter_pack_p (parm) && ARGUMENT_PACK_P (orig_arg))
{
if (same_type_p (t, TREE_TYPE (orig_arg)))
val = orig_arg;
else
{
/* Not sure if this is reachable, but it doesn't hurt
to be robust. */
error ("type mismatch in nontype parameter pack");
val = error_mark_node;
}
}
else if (!type_dependent_expression_p (orig_arg)
&& !uses_template_parms (t))
/* We used to call digest_init here. However, digest_init
will report errors, which we don't want when complain
is zero. More importantly, digest_init will try too
hard to convert things: for example, `0' should not be
converted to pointer type at this point according to
the standard. Accepting this is not merely an
extension, since deciding whether or not these
conversions can occur is part of determining which
function template to call, or whether a given explicit
argument specification is valid. */
val = convert_nontype_argument (t, orig_arg, complain);
else
val = canonicalize_expr_argument (orig_arg, complain);
if (val == NULL_TREE)
val = error_mark_node;
else if (val == error_mark_node && (complain & tf_error))
error ("could not convert template argument %qE from %qT to %qT",
orig_arg, TREE_TYPE (orig_arg), t);
if (INDIRECT_REF_P (val))
{
/* Reject template arguments that are references to built-in
functions with no library fallbacks. */
const_tree inner = TREE_OPERAND (val, 0);
const_tree innertype = TREE_TYPE (inner);
if (innertype
&& TREE_CODE (innertype) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (innertype)) == FUNCTION_TYPE
&& 0 < TREE_OPERAND_LENGTH (inner)
&& reject_gcc_builtin (TREE_OPERAND (inner, 0)))
return error_mark_node;
}
if (TREE_CODE (val) == SCOPE_REF)
{
/* Strip typedefs from the SCOPE_REF. */
tree type = canonicalize_type_argument (TREE_TYPE (val), complain);
tree scope = canonicalize_type_argument (TREE_OPERAND (val, 0),
complain);
val = build_qualified_name (type, scope, TREE_OPERAND (val, 1),
QUALIFIED_NAME_IS_TEMPLATE (val));
}
}
return val;
}
/* Coerces the remaining template arguments in INNER_ARGS (from
ARG_IDX to the end) into the parameter pack at PARM_IDX in PARMS.
Returns the coerced argument pack. PARM_IDX is the position of this
parameter in the template parameter list. ARGS is the original
template argument list. */
static tree
coerce_template_parameter_pack (tree parms,
int parm_idx,
tree args,
tree inner_args,
int arg_idx,
tree new_args,
int* lost,
tree in_decl,
tsubst_flags_t complain)
{
tree parm = TREE_VEC_ELT (parms, parm_idx);
int nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0;
tree packed_args;
tree argument_pack;
tree packed_parms = NULL_TREE;
if (arg_idx > nargs)
arg_idx = nargs;
if (tree packs = fixed_parameter_pack_p (TREE_VALUE (parm)))
{
/* When the template parameter is a non-type template parameter pack
or template template parameter pack whose type or template
parameters use parameter packs, we know exactly how many arguments
we are looking for. Build a vector of the instantiated decls for
these template parameters in PACKED_PARMS. */
/* We can't use make_pack_expansion here because it would interpret a
_DECL as a use rather than a declaration. */
tree decl = TREE_VALUE (parm);
tree exp = cxx_make_type (TYPE_PACK_EXPANSION);
SET_PACK_EXPANSION_PATTERN (exp, decl);
PACK_EXPANSION_PARAMETER_PACKS (exp) = packs;
SET_TYPE_STRUCTURAL_EQUALITY (exp);
TREE_VEC_LENGTH (args)--;
packed_parms = tsubst_pack_expansion (exp, args, complain, decl);
TREE_VEC_LENGTH (args)++;
if (packed_parms == error_mark_node)
return error_mark_node;
/* If we're doing a partial instantiation of a member template,
verify that all of the types used for the non-type
template parameter pack are, in fact, valid for non-type
template parameters. */
if (arg_idx < nargs
&& PACK_EXPANSION_P (TREE_VEC_ELT (inner_args, arg_idx)))
{
int j, len = TREE_VEC_LENGTH (packed_parms);
for (j = 0; j < len; ++j)
{
tree t = TREE_TYPE (TREE_VEC_ELT (packed_parms, j));
if (invalid_nontype_parm_type_p (t, complain))
return error_mark_node;
}
/* We don't know how many args we have yet, just
use the unconverted ones for now. */
return NULL_TREE;
}
packed_args = make_tree_vec (TREE_VEC_LENGTH (packed_parms));
}
/* Check if we have a placeholder pack, which indicates we're
in the context of a introduction list. In that case we want
to match this pack to the single placeholder. */
else if (arg_idx < nargs
&& TREE_CODE (TREE_VEC_ELT (inner_args, arg_idx)) == WILDCARD_DECL
&& WILDCARD_PACK_P (TREE_VEC_ELT (inner_args, arg_idx)))
{
nargs = arg_idx + 1;
packed_args = make_tree_vec (1);
}
else
packed_args = make_tree_vec (nargs - arg_idx);
/* Convert the remaining arguments, which will be a part of the
parameter pack "parm". */
int first_pack_arg = arg_idx;
for (; arg_idx < nargs; ++arg_idx)
{
tree arg = TREE_VEC_ELT (inner_args, arg_idx);
tree actual_parm = TREE_VALUE (parm);
int pack_idx = arg_idx - first_pack_arg;
if (packed_parms)
{
/* Once we've packed as many args as we have types, stop. */
if (pack_idx >= TREE_VEC_LENGTH (packed_parms))
break;
else if (PACK_EXPANSION_P (arg))
/* We don't know how many args we have yet, just
use the unconverted ones for now. */
return NULL_TREE;
else
actual_parm = TREE_VEC_ELT (packed_parms, pack_idx);
}
if (arg == error_mark_node)
{
if (complain & tf_error)
error ("template argument %d is invalid", arg_idx + 1);
}
else
arg = convert_template_argument (actual_parm,
arg, new_args, complain, parm_idx,
in_decl);
if (arg == error_mark_node)
(*lost)++;
TREE_VEC_ELT (packed_args, pack_idx) = arg;
}
if (arg_idx - first_pack_arg < TREE_VEC_LENGTH (packed_args)
&& TREE_VEC_LENGTH (packed_args) > 0)
{
if (complain & tf_error)
error ("wrong number of template arguments (%d, should be %d)",
arg_idx - first_pack_arg, TREE_VEC_LENGTH (packed_args));
return error_mark_node;
}
if (TREE_CODE (TREE_VALUE (parm)) == TYPE_DECL
|| TREE_CODE (TREE_VALUE (parm)) == TEMPLATE_DECL)
argument_pack = cxx_make_type (TYPE_ARGUMENT_PACK);
else
{
argument_pack = make_node (NONTYPE_ARGUMENT_PACK);
TREE_TYPE (argument_pack)
= tsubst (TREE_TYPE (TREE_VALUE (parm)), new_args, complain, in_decl);
TREE_CONSTANT (argument_pack) = 1;
}
SET_ARGUMENT_PACK_ARGS (argument_pack, packed_args);
if (CHECKING_P)
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (packed_args,
TREE_VEC_LENGTH (packed_args));
return argument_pack;
}
/* Returns the number of pack expansions in the template argument vector
ARGS. */
static int
pack_expansion_args_count (tree args)
{
int i;
int count = 0;
if (args)
for (i = 0; i < TREE_VEC_LENGTH (args); ++i)
{
tree elt = TREE_VEC_ELT (args, i);
if (elt && PACK_EXPANSION_P (elt))
++count;
}
return count;
}
/* Convert all template arguments to their appropriate types, and
return a vector containing the innermost resulting template
arguments. If any error occurs, return error_mark_node. Error and
warning messages are issued under control of COMPLAIN.
If REQUIRE_ALL_ARGS is false, argument deduction will be performed
for arguments not specified in ARGS. Otherwise, if
USE_DEFAULT_ARGS is true, default arguments will be used to fill in
unspecified arguments. If REQUIRE_ALL_ARGS is true, but
USE_DEFAULT_ARGS is false, then all arguments must be specified in
ARGS. */
static tree
coerce_template_parms (tree parms,
tree args,
tree in_decl,
tsubst_flags_t complain,
bool require_all_args,
bool use_default_args)
{
int nparms, nargs, parm_idx, arg_idx, lost = 0;
tree orig_inner_args;
tree inner_args;
tree new_args;
tree new_inner_args;
int saved_unevaluated_operand;
int saved_inhibit_evaluation_warnings;
/* When used as a boolean value, indicates whether this is a
variadic template parameter list. Since it's an int, we can also
subtract it from nparms to get the number of non-variadic
parameters. */
int variadic_p = 0;
int variadic_args_p = 0;
int post_variadic_parms = 0;
/* Likewise for parameters with default arguments. */
int default_p = 0;
if (args == error_mark_node)
return error_mark_node;
nparms = TREE_VEC_LENGTH (parms);
/* Determine if there are any parameter packs or default arguments. */
for (parm_idx = 0; parm_idx < nparms; ++parm_idx)
{
tree parm = TREE_VEC_ELT (parms, parm_idx);
if (variadic_p)
++post_variadic_parms;
if (template_parameter_pack_p (TREE_VALUE (parm)))
++variadic_p;
if (TREE_PURPOSE (parm))
++default_p;
}
inner_args = orig_inner_args = INNERMOST_TEMPLATE_ARGS (args);
/* If there are no parameters that follow a parameter pack, we need to
expand any argument packs so that we can deduce a parameter pack from
some non-packed args followed by an argument pack, as in variadic85.C.
If there are such parameters, we need to leave argument packs intact
so the arguments are assigned properly. This can happen when dealing
with a nested class inside a partial specialization of a class
template, as in variadic92.C, or when deducing a template parameter pack
from a sub-declarator, as in variadic114.C. */
if (!post_variadic_parms)
inner_args = expand_template_argument_pack (inner_args);
/* Count any pack expansion args. */
variadic_args_p = pack_expansion_args_count (inner_args);
nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0;
if ((nargs - variadic_args_p > nparms && !variadic_p)
|| (nargs < nparms - variadic_p
&& require_all_args
&& !variadic_args_p
&& (!use_default_args
|| (TREE_VEC_ELT (parms, nargs) != error_mark_node
&& !TREE_PURPOSE (TREE_VEC_ELT (parms, nargs))))))
{
if (complain & tf_error)
{
if (variadic_p || default_p)
{
nparms -= variadic_p + default_p;
error ("wrong number of template arguments "
"(%d, should be at least %d)", nargs, nparms);
}
else
error ("wrong number of template arguments "
"(%d, should be %d)", nargs, nparms);
if (in_decl)
inform (DECL_SOURCE_LOCATION (in_decl),
"provided for %qD", in_decl);
}
return error_mark_node;
}
/* We can't pass a pack expansion to a non-pack parameter of an alias
template (DR 1430). */
else if (in_decl
&& (DECL_ALIAS_TEMPLATE_P (in_decl)
|| concept_template_p (in_decl))
&& variadic_args_p
&& nargs - variadic_args_p < nparms - variadic_p)
{
if (complain & tf_error)
{
for (int i = 0; i < TREE_VEC_LENGTH (inner_args); ++i)
{
tree arg = TREE_VEC_ELT (inner_args, i);
tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i));
if (PACK_EXPANSION_P (arg)
&& !template_parameter_pack_p (parm))
{
if (DECL_ALIAS_TEMPLATE_P (in_decl))
error_at (location_of (arg),
"pack expansion argument for non-pack parameter "
"%qD of alias template %qD", parm, in_decl);
else
error_at (location_of (arg),
"pack expansion argument for non-pack parameter "
"%qD of concept %qD", parm, in_decl);
inform (DECL_SOURCE_LOCATION (parm), "declared here");
goto found;
}
}
gcc_unreachable ();
found:;
}
return error_mark_node;
}
/* We need to evaluate the template arguments, even though this
template-id may be nested within a "sizeof". */
saved_unevaluated_operand = cp_unevaluated_operand;
cp_unevaluated_operand = 0;
saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
c_inhibit_evaluation_warnings = 0;
new_inner_args = make_tree_vec (nparms);
new_args = add_outermost_template_args (args, new_inner_args);
int pack_adjust = 0;
for (parm_idx = 0, arg_idx = 0; parm_idx < nparms; parm_idx++, arg_idx++)
{
tree arg;
tree parm;
/* Get the Ith template parameter. */
parm = TREE_VEC_ELT (parms, parm_idx);
if (parm == error_mark_node)
{
TREE_VEC_ELT (new_inner_args, arg_idx) = error_mark_node;
continue;
}
/* Calculate the next argument. */
if (arg_idx < nargs)
arg = TREE_VEC_ELT (inner_args, arg_idx);
else
arg = NULL_TREE;
if (template_parameter_pack_p (TREE_VALUE (parm))
&& !(arg && ARGUMENT_PACK_P (arg)))
{
/* Some arguments will be placed in the
template parameter pack PARM. */
arg = coerce_template_parameter_pack (parms, parm_idx, args,
inner_args, arg_idx,
new_args, &lost,
in_decl, complain);
if (arg == NULL_TREE)
{
/* We don't know how many args we have yet, just use the
unconverted (and still packed) ones for now. */
new_inner_args = orig_inner_args;
arg_idx = nargs;
break;
}
TREE_VEC_ELT (new_inner_args, parm_idx) = arg;
/* Store this argument. */
if (arg == error_mark_node)
{
lost++;
/* We are done with all of the arguments. */
arg_idx = nargs;
}
else
{
pack_adjust = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)) - 1;
arg_idx += pack_adjust;
}
continue;
}
else if (arg)
{
if (PACK_EXPANSION_P (arg))
{
/* "If every valid specialization of a variadic template
requires an empty template parameter pack, the template is
ill-formed, no diagnostic required." So check that the
pattern works with this parameter. */
tree pattern = PACK_EXPANSION_PATTERN (arg);
tree conv = convert_template_argument (TREE_VALUE (parm),
pattern, new_args,
complain, parm_idx,
in_decl);
if (conv == error_mark_node)
{
if (complain & tf_error)
inform (input_location, "so any instantiation with a "
"non-empty parameter pack would be ill-formed");
++lost;
}
else if (TYPE_P (conv) && !TYPE_P (pattern))
/* Recover from missing typename. */
TREE_VEC_ELT (inner_args, arg_idx)
= make_pack_expansion (conv);
/* We don't know how many args we have yet, just
use the unconverted ones for now. */
new_inner_args = inner_args;
arg_idx = nargs;
break;
}
}
else if (require_all_args)
{
/* There must be a default arg in this case. */
arg = tsubst_template_arg (TREE_PURPOSE (parm), new_args,
complain, in_decl);
/* The position of the first default template argument,
is also the number of non-defaulted arguments in NEW_INNER_ARGS.
Record that. */
if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args,
arg_idx - pack_adjust);
}
else
break;
if (arg == error_mark_node)
{
if (complain & tf_error)
error ("template argument %d is invalid", arg_idx + 1);
}
else if (!arg)
/* This only occurs if there was an error in the template
parameter list itself (which we would already have
reported) that we are trying to recover from, e.g., a class
template with a parameter list such as
template<typename..., typename>. */
++lost;
else
arg = convert_template_argument (TREE_VALUE (parm),
arg, new_args, complain,
parm_idx, in_decl);
if (arg == error_mark_node)
lost++;
TREE_VEC_ELT (new_inner_args, arg_idx - pack_adjust) = arg;
}
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
if (variadic_p && arg_idx < nargs)
{
if (complain & tf_error)
{
error ("wrong number of template arguments "
"(%d, should be %d)", nargs, arg_idx);
if (in_decl)
error ("provided for %q+D", in_decl);
}
return error_mark_node;
}
if (lost)
return error_mark_node;
if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args,
TREE_VEC_LENGTH (new_inner_args));
return new_inner_args;
}
/* Convert all template arguments to their appropriate types, and
return a vector containing the innermost resulting template
arguments. If any error occurs, return error_mark_node. Error and
warning messages are not issued.
Note that no function argument deduction is performed, and default
arguments are used to fill in unspecified arguments. */
tree
coerce_template_parms (tree parms, tree args, tree in_decl)
{
return coerce_template_parms (parms, args, in_decl, tf_none, true, true);
}
/* Convert all template arguments to their appropriate type, and
instantiate default arguments as needed. This returns a vector
containing the innermost resulting template arguments, or
error_mark_node if unsuccessful. */
tree
coerce_template_parms (tree parms, tree args, tree in_decl,
tsubst_flags_t complain)
{
return coerce_template_parms (parms, args, in_decl, complain, true, true);
}
/* Like coerce_template_parms. If PARMS represents all template
parameters levels, this function returns a vector of vectors
representing all the resulting argument levels. Note that in this
case, only the innermost arguments are coerced because the
outermost ones are supposed to have been coerced already.
Otherwise, if PARMS represents only (the innermost) vector of
parameters, this function returns a vector containing just the
innermost resulting arguments. */
static tree
coerce_innermost_template_parms (tree parms,
tree args,
tree in_decl,
tsubst_flags_t complain,
bool require_all_args,
bool use_default_args)
{
int parms_depth = TMPL_PARMS_DEPTH (parms);
int args_depth = TMPL_ARGS_DEPTH (args);
tree coerced_args;
if (parms_depth > 1)
{
coerced_args = make_tree_vec (parms_depth);
tree level;
int cur_depth;
for (level = parms, cur_depth = parms_depth;
parms_depth > 0 && level != NULL_TREE;
level = TREE_CHAIN (level), --cur_depth)
{
tree l;
if (cur_depth == args_depth)
l = coerce_template_parms (TREE_VALUE (level),
args, in_decl, complain,
require_all_args,
use_default_args);
else
l = TMPL_ARGS_LEVEL (args, cur_depth);
if (l == error_mark_node)
return error_mark_node;
SET_TMPL_ARGS_LEVEL (coerced_args, cur_depth, l);
}
}
else
coerced_args = coerce_template_parms (INNERMOST_TEMPLATE_PARMS (parms),
args, in_decl, complain,
require_all_args,
use_default_args);
return coerced_args;
}
/* Returns 1 if template args OT and NT are equivalent. */
int
template_args_equal (tree ot, tree nt, bool partial_order /* = false */)
{
if (nt == ot)
return 1;
if (nt == NULL_TREE || ot == NULL_TREE)
return false;
if (nt == any_targ_node || ot == any_targ_node)
return true;
if (TREE_CODE (nt) == TREE_VEC)
/* For member templates */
return TREE_CODE (ot) == TREE_VEC && comp_template_args (ot, nt);
else if (PACK_EXPANSION_P (ot))
return (PACK_EXPANSION_P (nt)
&& template_args_equal (PACK_EXPANSION_PATTERN (ot),
PACK_EXPANSION_PATTERN (nt))
&& template_args_equal (PACK_EXPANSION_EXTRA_ARGS (ot),
PACK_EXPANSION_EXTRA_ARGS (nt)));
else if (ARGUMENT_PACK_P (ot))
{
int i, len;
tree opack, npack;
if (!ARGUMENT_PACK_P (nt))
return 0;
opack = ARGUMENT_PACK_ARGS (ot);
npack = ARGUMENT_PACK_ARGS (nt);
len = TREE_VEC_LENGTH (opack);
if (TREE_VEC_LENGTH (npack) != len)
return 0;
for (i = 0; i < len; ++i)
if (!template_args_equal (TREE_VEC_ELT (opack, i),
TREE_VEC_ELT (npack, i)))
return 0;
return 1;
}
else if (ot && TREE_CODE (ot) == ARGUMENT_PACK_SELECT)
gcc_unreachable ();
else if (TYPE_P (nt))
{
if (!TYPE_P (ot))
return false;
/* Don't treat an alias template specialization with dependent
arguments as equivalent to its underlying type when used as a
template argument; we need them to be distinct so that we
substitute into the specialization arguments at instantiation
time. And aliases can't be equivalent without being ==, so
we don't need to look any deeper.
During partial ordering, however, we need to treat them normally so
that we can order uses of the same alias with different
cv-qualification (79960). */
if (!partial_order
&& (TYPE_ALIAS_P (nt) || TYPE_ALIAS_P (ot)))
return false;
else
return same_type_p (ot, nt);
}
else if (TREE_CODE (ot) == TREE_VEC || TYPE_P (ot))
return 0;
else
{
/* Try to treat a template non-type argument that has been converted
to the parameter type as equivalent to one that hasn't yet. */
for (enum tree_code code1 = TREE_CODE (ot);
CONVERT_EXPR_CODE_P (code1)
|| code1 == NON_LVALUE_EXPR;
code1 = TREE_CODE (ot))
ot = TREE_OPERAND (ot, 0);
for (enum tree_code code2 = TREE_CODE (nt);
CONVERT_EXPR_CODE_P (code2)
|| code2 == NON_LVALUE_EXPR;
code2 = TREE_CODE (nt))
nt = TREE_OPERAND (nt, 0);
return cp_tree_equal (ot, nt);
}
}
/* Returns 1 iff the OLDARGS and NEWARGS are in fact identical sets of
template arguments. Returns 0 otherwise, and updates OLDARG_PTR and
NEWARG_PTR with the offending arguments if they are non-NULL. */
int
comp_template_args (tree oldargs, tree newargs,
tree *oldarg_ptr, tree *newarg_ptr,
bool partial_order)
{
int i;
if (oldargs == newargs)
return 1;
if (!oldargs || !newargs)
return 0;
if (TREE_VEC_LENGTH (oldargs) != TREE_VEC_LENGTH (newargs))
return 0;
for (i = 0; i < TREE_VEC_LENGTH (oldargs); ++i)
{
tree nt = TREE_VEC_ELT (newargs, i);
tree ot = TREE_VEC_ELT (oldargs, i);
if (! template_args_equal (ot, nt, partial_order))
{
if (oldarg_ptr != NULL)
*oldarg_ptr = ot;
if (newarg_ptr != NULL)
*newarg_ptr = nt;
return 0;
}
}
return 1;
}
inline bool
comp_template_args_porder (tree oargs, tree nargs)
{
return comp_template_args (oargs, nargs, NULL, NULL, true);
}
static void
add_pending_template (tree d)
{
tree ti = (TYPE_P (d)
? CLASSTYPE_TEMPLATE_INFO (d)
: DECL_TEMPLATE_INFO (d));
struct pending_template *pt;
int level;
if (TI_PENDING_TEMPLATE_FLAG (ti))
return;
/* We are called both from instantiate_decl, where we've already had a
tinst_level pushed, and instantiate_template, where we haven't.
Compensate. */
level = !current_tinst_level || current_tinst_level->decl != d;
if (level)
push_tinst_level (d);
pt = ggc_alloc<pending_template> ();
pt->next = NULL;
pt->tinst = current_tinst_level;
if (last_pending_template)
last_pending_template->next = pt;
else
pending_templates = pt;
last_pending_template = pt;
TI_PENDING_TEMPLATE_FLAG (ti) = 1;
if (level)
pop_tinst_level ();
}
/* Return a TEMPLATE_ID_EXPR corresponding to the indicated FNS and
ARGLIST. Valid choices for FNS are given in the cp-tree.def
documentation for TEMPLATE_ID_EXPR. */
tree
lookup_template_function (tree fns, tree arglist)
{
tree type;
if (fns == error_mark_node || arglist == error_mark_node)
return error_mark_node;
gcc_assert (!arglist || TREE_CODE (arglist) == TREE_VEC);
if (!is_overloaded_fn (fns) && !identifier_p (fns))
{
error ("%q#D is not a function template", fns);
return error_mark_node;
}
if (BASELINK_P (fns))
{
BASELINK_FUNCTIONS (fns) = build2 (TEMPLATE_ID_EXPR,
unknown_type_node,
BASELINK_FUNCTIONS (fns),
arglist);
return fns;
}
type = TREE_TYPE (fns);
if (TREE_CODE (fns) == OVERLOAD || !type)
type = unknown_type_node;
return build2 (TEMPLATE_ID_EXPR, type, fns, arglist);
}
/* Within the scope of a template class S<T>, the name S gets bound
(in build_self_reference) to a TYPE_DECL for the class, not a
TEMPLATE_DECL. If DECL is a TYPE_DECL for current_class_type,
or one of its enclosing classes, and that type is a template,
return the associated TEMPLATE_DECL. Otherwise, the original
DECL is returned.
Also handle the case when DECL is a TREE_LIST of ambiguous
injected-class-names from different bases. */
tree
maybe_get_template_decl_from_type_decl (tree decl)
{
if (decl == NULL_TREE)
return decl;
/* DR 176: A lookup that finds an injected-class-name (10.2
[class.member.lookup]) can result in an ambiguity in certain cases
(for example, if it is found in more than one base class). If all of
the injected-class-names that are found refer to specializations of
the same class template, and if the name is followed by a
template-argument-list, the reference refers to the class template
itself and not a specialization thereof, and is not ambiguous. */
if (TREE_CODE (decl) == TREE_LIST)
{
tree t, tmpl = NULL_TREE;
for (t = decl; t; t = TREE_CHAIN (t))
{
tree elt = maybe_get_template_decl_from_type_decl (TREE_VALUE (t));
if (!tmpl)
tmpl = elt;
else if (tmpl != elt)
break;
}
if (tmpl && t == NULL_TREE)
return tmpl;
else
return decl;
}
return (decl != NULL_TREE
&& DECL_SELF_REFERENCE_P (decl)
&& CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (decl)))
? CLASSTYPE_TI_TEMPLATE (TREE_TYPE (decl)) : decl;
}
/* Given an IDENTIFIER_NODE (or type TEMPLATE_DECL) and a chain of
parameters, find the desired type.
D1 is the PTYPENAME terminal, and ARGLIST is the list of arguments.
IN_DECL, if non-NULL, is the template declaration we are trying to
instantiate.
If ENTERING_SCOPE is nonzero, we are about to enter the scope of
the class we are looking up.
Issue error and warning messages under control of COMPLAIN.
If the template class is really a local class in a template
function, then the FUNCTION_CONTEXT is the function in which it is
being instantiated.
??? Note that this function is currently called *twice* for each
template-id: the first time from the parser, while creating the
incomplete type (finish_template_type), and the second type during the
real instantiation (instantiate_template_class). This is surely something
that we want to avoid. It also causes some problems with argument
coercion (see convert_nontype_argument for more information on this). */
static tree
lookup_template_class_1 (tree d1, tree arglist, tree in_decl, tree context,
int entering_scope, tsubst_flags_t complain)
{
tree templ = NULL_TREE, parmlist;
tree t;
spec_entry **slot;
spec_entry *entry;
spec_entry elt;
hashval_t hash;
if (identifier_p (d1))
{
tree value = innermost_non_namespace_value (d1);
if (value && DECL_TEMPLATE_TEMPLATE_PARM_P (value))
templ = value;
else
{
if (context)
push_decl_namespace (context);
templ = lookup_name (d1);
templ = maybe_get_template_decl_from_type_decl (templ);
if (context)
pop_decl_namespace ();
}
if (templ)
context = DECL_CONTEXT (templ);
}
else if (TREE_CODE (d1) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (d1)))
{
tree type = TREE_TYPE (d1);
/* If we are declaring a constructor, say A<T>::A<T>, we will get
an implicit typename for the second A. Deal with it. */
if (TREE_CODE (type) == TYPENAME_TYPE && TREE_TYPE (type))
type = TREE_TYPE (type);
if (CLASSTYPE_TEMPLATE_INFO (type))
{
templ = CLASSTYPE_TI_TEMPLATE (type);
d1 = DECL_NAME (templ);
}
}
else if (TREE_CODE (d1) == ENUMERAL_TYPE
|| (TYPE_P (d1) && MAYBE_CLASS_TYPE_P (d1)))
{
templ = TYPE_TI_TEMPLATE (d1);
d1 = DECL_NAME (templ);
}
else if (DECL_TYPE_TEMPLATE_P (d1))
{
templ = d1;
d1 = DECL_NAME (templ);
context = DECL_CONTEXT (templ);
}
else if (DECL_TEMPLATE_TEMPLATE_PARM_P (d1))
{
templ = d1;
d1 = DECL_NAME (templ);
}
/* Issue an error message if we didn't find a template. */
if (! templ)
{
if (complain & tf_error)
error ("%qT is not a template", d1);
return error_mark_node;
}
if (TREE_CODE (templ) != TEMPLATE_DECL
/* Make sure it's a user visible template, if it was named by
the user. */
|| ((complain & tf_user) && !DECL_TEMPLATE_PARM_P (templ)
&& !PRIMARY_TEMPLATE_P (templ)))
{
if (complain & tf_error)
{
error ("non-template type %qT used as a template", d1);
if (in_decl)
error ("for template declaration %q+D", in_decl);
}
return error_mark_node;
}
complain &= ~tf_user;
/* An alias that just changes the name of a template is equivalent to the
other template, so if any of the arguments are pack expansions, strip
the alias to avoid problems with a pack expansion passed to a non-pack
alias template parameter (DR 1430). */
if (pack_expansion_args_count (INNERMOST_TEMPLATE_ARGS (arglist)))
templ = get_underlying_template (templ);
if (DECL_TEMPLATE_TEMPLATE_PARM_P (templ))
{
tree parm;
tree arglist2 = coerce_template_args_for_ttp (templ, arglist, complain);
if (arglist2 == error_mark_node
|| (!uses_template_parms (arglist2)
&& check_instantiated_args (templ, arglist2, complain)))
return error_mark_node;
parm = bind_template_template_parm (TREE_TYPE (templ), arglist2);
return parm;
}
else
{
tree template_type = TREE_TYPE (templ);
tree gen_tmpl;
tree type_decl;
tree found = NULL_TREE;
int arg_depth;
int parm_depth;
int is_dependent_type;
int use_partial_inst_tmpl = false;
if (template_type == error_mark_node)
/* An error occurred while building the template TEMPL, and a
diagnostic has most certainly been emitted for that
already. Let's propagate that error. */
return error_mark_node;
gen_tmpl = most_general_template (templ);
parmlist = DECL_TEMPLATE_PARMS (gen_tmpl);
parm_depth = TMPL_PARMS_DEPTH (parmlist);
arg_depth = TMPL_ARGS_DEPTH (arglist);
if (arg_depth == 1 && parm_depth > 1)
{
/* We've been given an incomplete set of template arguments.
For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
we will be called with an ARGLIST of `U*', but the
TEMPLATE will be `template <class T> template
<class U> struct S1<T>::S2'. We must fill in the missing
arguments. */
tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (templ));
arglist = add_outermost_template_args (TI_ARGS (ti), arglist);
arg_depth = TMPL_ARGS_DEPTH (arglist);
}
/* Now we should have enough arguments. */
gcc_assert (parm_depth == arg_depth);
/* From here on, we're only interested in the most general
template. */
/* Calculate the BOUND_ARGS. These will be the args that are
actually tsubst'd into the definition to create the
instantiation. */
arglist = coerce_innermost_template_parms (parmlist, arglist, gen_tmpl,
complain,
/*require_all_args=*/true,
/*use_default_args=*/true);
if (arglist == error_mark_node)
/* We were unable to bind the arguments. */
return error_mark_node;
/* In the scope of a template class, explicit references to the
template class refer to the type of the template, not any
instantiation of it. For example, in:
template <class T> class C { void f(C<T>); }
the `C<T>' is just the same as `C'. Outside of the
class, however, such a reference is an instantiation. */
if (entering_scope
|| !PRIMARY_TEMPLATE_P (gen_tmpl)
|| currently_open_class (template_type))
{
tree tinfo = TYPE_TEMPLATE_INFO (template_type);
if (tinfo && comp_template_args (TI_ARGS (tinfo), arglist))
return template_type;
}
/* If we already have this specialization, return it. */
elt.tmpl = gen_tmpl;
elt.args = arglist;
elt.spec = NULL_TREE;
hash = spec_hasher::hash (&elt);
entry = type_specializations->find_with_hash (&elt, hash);
if (entry)
return entry->spec;
/* If the the template's constraints are not satisfied,
then we cannot form a valid type.
Note that the check is deferred until after the hash
lookup. This prevents redundant checks on previously
instantiated specializations. */
if (flag_concepts && !constraints_satisfied_p (gen_tmpl, arglist))
{
if (complain & tf_error)
{
error ("template constraint failure");
diagnose_constraints (input_location, gen_tmpl, arglist);
}
return error_mark_node;
}
is_dependent_type = uses_template_parms (arglist);
/* If the deduced arguments are invalid, then the binding
failed. */
if (!is_dependent_type
&& check_instantiated_args (gen_tmpl,
INNERMOST_TEMPLATE_ARGS (arglist),
complain))
return error_mark_node;
if (!is_dependent_type
&& !PRIMARY_TEMPLATE_P (gen_tmpl)
&& !LAMBDA_TYPE_P (TREE_TYPE (gen_tmpl))
&& TREE_CODE (CP_DECL_CONTEXT (gen_tmpl)) == NAMESPACE_DECL)
{
found = xref_tag_from_type (TREE_TYPE (gen_tmpl),
DECL_NAME (gen_tmpl),
/*tag_scope=*/ts_global);
return found;
}
context = tsubst (DECL_CONTEXT (gen_tmpl), arglist,
complain, in_decl);
if (context == error_mark_node)
return error_mark_node;
if (!context)
context = global_namespace;
/* Create the type. */
if (DECL_ALIAS_TEMPLATE_P (gen_tmpl))
{
/* The user referred to a specialization of an alias
template represented by GEN_TMPL.
[temp.alias]/2 says:
When a template-id refers to the specialization of an
alias template, it is equivalent to the associated
type obtained by substitution of its
template-arguments for the template-parameters in the
type-id of the alias template. */
t = tsubst (TREE_TYPE (gen_tmpl), arglist, complain, in_decl);
/* Note that the call above (by indirectly calling
register_specialization in tsubst_decl) registers the
TYPE_DECL representing the specialization of the alias
template. So next time someone substitutes ARGLIST for
the template parms into the alias template (GEN_TMPL),
she'll get that TYPE_DECL back. */
if (t == error_mark_node)
return t;
}
else if (TREE_CODE (template_type) == ENUMERAL_TYPE)
{
if (!is_dependent_type)
{
set_current_access_from_decl (TYPE_NAME (template_type));
t = start_enum (TYPE_IDENTIFIER (template_type), NULL_TREE,
tsubst (ENUM_UNDERLYING_TYPE (template_type),
arglist, complain, in_decl),
tsubst_attributes (TYPE_ATTRIBUTES (template_type),
arglist, complain, in_decl),
SCOPED_ENUM_P (template_type), NULL);
if (t == error_mark_node)
return t;
}
else
{
/* We don't want to call start_enum for this type, since
the values for the enumeration constants may involve
template parameters. And, no one should be interested
in the enumeration constants for such a type. */
t = cxx_make_type (ENUMERAL_TYPE);
SET_SCOPED_ENUM_P (t, SCOPED_ENUM_P (template_type));
}
SET_OPAQUE_ENUM_P (t, OPAQUE_ENUM_P (template_type));
ENUM_FIXED_UNDERLYING_TYPE_P (t)
= ENUM_FIXED_UNDERLYING_TYPE_P (template_type);
}
else if (CLASS_TYPE_P (template_type))
{
t = make_class_type (TREE_CODE (template_type));
CLASSTYPE_DECLARED_CLASS (t)
= CLASSTYPE_DECLARED_CLASS (template_type);
SET_CLASSTYPE_IMPLICIT_INSTANTIATION (t);
/* A local class. Make sure the decl gets registered properly. */
if (context == current_function_decl)
pushtag (DECL_NAME (gen_tmpl), t, /*tag_scope=*/ts_current);
if (comp_template_args (CLASSTYPE_TI_ARGS (template_type), arglist))
/* This instantiation is another name for the primary
template type. Set the TYPE_CANONICAL field
appropriately. */
TYPE_CANONICAL (t) = template_type;
else if (any_template_arguments_need_structural_equality_p (arglist))
/* Some of the template arguments require structural
equality testing, so this template class requires
structural equality testing. */
SET_TYPE_STRUCTURAL_EQUALITY (t);
}
else
gcc_unreachable ();
/* If we called start_enum or pushtag above, this information
will already be set up. */
if (!TYPE_NAME (t))
{
TYPE_CONTEXT (t) = FROB_CONTEXT (context);
type_decl = create_implicit_typedef (DECL_NAME (gen_tmpl), t);
DECL_CONTEXT (type_decl) = TYPE_CONTEXT (t);
DECL_SOURCE_LOCATION (type_decl)
= DECL_SOURCE_LOCATION (TYPE_STUB_DECL (template_type));
}
else
type_decl = TYPE_NAME (t);
if (CLASS_TYPE_P (template_type))
{
TREE_PRIVATE (type_decl)
= TREE_PRIVATE (TYPE_MAIN_DECL (template_type));
TREE_PROTECTED (type_decl)
= TREE_PROTECTED (TYPE_MAIN_DECL (template_type));
if (CLASSTYPE_VISIBILITY_SPECIFIED (template_type))
{
DECL_VISIBILITY_SPECIFIED (type_decl) = 1;
DECL_VISIBILITY (type_decl) = CLASSTYPE_VISIBILITY (template_type);
}
}
if (OVERLOAD_TYPE_P (t)
&& !DECL_ALIAS_TEMPLATE_P (gen_tmpl))
{
static const char *tags[] = {"abi_tag", "may_alias"};
for (unsigned ix = 0; ix != 2; ix++)
{
tree attributes
= lookup_attribute (tags[ix], TYPE_ATTRIBUTES (template_type));
if (attributes)
TYPE_ATTRIBUTES (t)
= tree_cons (TREE_PURPOSE (attributes),
TREE_VALUE (attributes),
TYPE_ATTRIBUTES (t));
}
}
/* Let's consider the explicit specialization of a member
of a class template specialization that is implicitly instantiated,
e.g.:
template<class T>
struct S
{
template<class U> struct M {}; //#0
};
template<>
template<>
struct S<int>::M<char> //#1
{
int i;
};
[temp.expl.spec]/4 says this is valid.
In this case, when we write:
S<int>::M<char> m;
M is instantiated from the CLASSTYPE_TI_TEMPLATE of #1, not from
the one of #0.
When we encounter #1, we want to store the partial instantiation
of M (template<class T> S<int>::M<T>) in its CLASSTYPE_TI_TEMPLATE.
For all cases other than this "explicit specialization of member of a
class template", we just want to store the most general template into
the CLASSTYPE_TI_TEMPLATE of M.
This case of "explicit specialization of member of a class template"
only happens when:
1/ the enclosing class is an instantiation of, and therefore not
the same as, the context of the most general template, and
2/ we aren't looking at the partial instantiation itself, i.e.
the innermost arguments are not the same as the innermost parms of
the most general template.
So it's only when 1/ and 2/ happens that we want to use the partial
instantiation of the member template in lieu of its most general
template. */
if (PRIMARY_TEMPLATE_P (gen_tmpl)
&& TMPL_ARGS_HAVE_MULTIPLE_LEVELS (arglist)
/* the enclosing class must be an instantiation... */
&& CLASS_TYPE_P (context)
&& !same_type_p (context, DECL_CONTEXT (gen_tmpl)))
{
TREE_VEC_LENGTH (arglist)--;
++processing_template_decl;
tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (gen_tmpl));
tree partial_inst_args =
tsubst (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)),
arglist, complain, NULL_TREE);
--processing_template_decl;
TREE_VEC_LENGTH (arglist)++;
if (partial_inst_args == error_mark_node)
return error_mark_node;
use_partial_inst_tmpl =
/*...and we must not be looking at the partial instantiation
itself. */
!comp_template_args (INNERMOST_TEMPLATE_ARGS (arglist),
partial_inst_args);
}
if (!use_partial_inst_tmpl)
/* This case is easy; there are no member templates involved. */
found = gen_tmpl;
else
{
/* This is a full instantiation of a member template. Find
the partial instantiation of which this is an instance. */
/* Temporarily reduce by one the number of levels in the ARGLIST
so as to avoid comparing the last set of arguments. */
TREE_VEC_LENGTH (arglist)--;
found = tsubst (gen_tmpl, arglist, complain, NULL_TREE);
TREE_VEC_LENGTH (arglist)++;
/* FOUND is either a proper class type, or an alias
template specialization. In the later case, it's a
TYPE_DECL, resulting from the substituting of arguments
for parameters in the TYPE_DECL of the alias template
done earlier. So be careful while getting the template
of FOUND. */
found = (TREE_CODE (found) == TEMPLATE_DECL
? found
: (TREE_CODE (found) == TYPE_DECL
? DECL_TI_TEMPLATE (found)
: CLASSTYPE_TI_TEMPLATE (found)));
}
// Build template info for the new specialization.
SET_TYPE_TEMPLATE_INFO (t, build_template_info (found, arglist));
elt.spec = t;
slot = type_specializations->find_slot_with_hash (&elt, hash, INSERT);
entry = ggc_alloc<spec_entry> ();
*entry = elt;
*slot = entry;
/* Note this use of the partial instantiation so we can check it
later in maybe_process_partial_specialization. */
DECL_TEMPLATE_INSTANTIATIONS (found)
= tree_cons (arglist, t,
DECL_TEMPLATE_INSTANTIATIONS (found));
if (TREE_CODE (template_type) == ENUMERAL_TYPE && !is_dependent_type
&& !DECL_ALIAS_TEMPLATE_P (gen_tmpl))
/* Now that the type has been registered on the instantiations
list, we set up the enumerators. Because the enumeration
constants may involve the enumeration type itself, we make
sure to register the type first, and then create the
constants. That way, doing tsubst_expr for the enumeration
constants won't result in recursive calls here; we'll find
the instantiation and exit above. */
tsubst_enum (template_type, t, arglist);
if (CLASS_TYPE_P (template_type) && is_dependent_type)
/* If the type makes use of template parameters, the
code that generates debugging information will crash. */
DECL_IGNORED_P (TYPE_MAIN_DECL (t)) = 1;
/* Possibly limit visibility based on template args. */
TREE_PUBLIC (type_decl) = 1;
determine_visibility (type_decl);
inherit_targ_abi_tags (t);
return t;
}
}
/* Wrapper for lookup_template_class_1. */
tree
lookup_template_class (tree d1, tree arglist, tree in_decl, tree context,
int entering_scope, tsubst_flags_t complain)
{
tree ret;
timevar_push (TV_TEMPLATE_INST);
ret = lookup_template_class_1 (d1, arglist, in_decl, context,
entering_scope, complain);
timevar_pop (TV_TEMPLATE_INST);
return ret;
}
/* Return a TEMPLATE_ID_EXPR for the given variable template and ARGLIST. */
tree
lookup_template_variable (tree templ, tree arglist)
{
/* The type of the expression is NULL_TREE since the template-id could refer
to an explicit or partial specialization. */
tree type = NULL_TREE;
if (flag_concepts && variable_concept_p (templ))
/* Except that concepts are always bool. */
type = boolean_type_node;
return build2 (TEMPLATE_ID_EXPR, type, templ, arglist);
}
/* Instantiate a variable declaration from a TEMPLATE_ID_EXPR for use. */
tree
finish_template_variable (tree var, tsubst_flags_t complain)
{
tree templ = TREE_OPERAND (var, 0);
tree arglist = TREE_OPERAND (var, 1);
/* We never want to return a VAR_DECL for a variable concept, since they
aren't instantiated. In a template, leave the TEMPLATE_ID_EXPR alone. */
bool concept_p = flag_concepts && variable_concept_p (templ);
if (concept_p && processing_template_decl)
return var;
tree tmpl_args = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (templ));
arglist = add_outermost_template_args (tmpl_args, arglist);
templ = most_general_template (templ);
tree parms = DECL_TEMPLATE_PARMS (templ);
arglist = coerce_innermost_template_parms (parms, arglist, templ, complain,
/*req_all*/true,
/*use_default*/true);
if (flag_concepts && !constraints_satisfied_p (templ, arglist))
{
if (complain & tf_error)
{
error ("use of invalid variable template %qE", var);
diagnose_constraints (location_of (var), templ, arglist);
}
return error_mark_node;
}
/* If a template-id refers to a specialization of a variable
concept, then the expression is true if and only if the
concept's constraints are satisfied by the given template
arguments.
NOTE: This is an extension of Concepts Lite TS that
allows constraints to be used in expressions. */
if (concept_p)
{
tree decl = DECL_TEMPLATE_RESULT (templ);
return evaluate_variable_concept (decl, arglist);
}
return instantiate_template (templ, arglist, complain);
}
/* Construct a TEMPLATE_ID_EXPR for the given variable template TEMPL having
TARGS template args, and instantiate it if it's not dependent. */
tree
lookup_and_finish_template_variable (tree templ, tree targs,
tsubst_flags_t complain)
{
templ = lookup_template_variable (templ, targs);
if (!any_dependent_template_arguments_p (targs))
{
templ = finish_template_variable (templ, complain);
mark_used (templ);
}
return convert_from_reference (templ);
}
struct pair_fn_data
{
tree_fn_t fn;
tree_fn_t any_fn;
void *data;
/* True when we should also visit template parameters that occur in
non-deduced contexts. */
bool include_nondeduced_p;
hash_set<tree> *visited;
};
/* Called from for_each_template_parm via walk_tree. */
static tree
for_each_template_parm_r (tree *tp, int *walk_subtrees, void *d)
{
tree t = *tp;
struct pair_fn_data *pfd = (struct pair_fn_data *) d;
tree_fn_t fn = pfd->fn;
void *data = pfd->data;
tree result = NULL_TREE;
#define WALK_SUBTREE(NODE) \
do \
{ \
result = for_each_template_parm (NODE, fn, data, pfd->visited, \
pfd->include_nondeduced_p, \
pfd->any_fn); \
if (result) goto out; \
} \
while (0)
if (pfd->any_fn && (*pfd->any_fn)(t, data))
return t;
if (TYPE_P (t)
&& (pfd->include_nondeduced_p || TREE_CODE (t) != TYPENAME_TYPE))
WALK_SUBTREE (TYPE_CONTEXT (t));
switch (TREE_CODE (t))
{
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
break;
/* Fall through. */
case UNION_TYPE:
case ENUMERAL_TYPE:
if (!TYPE_TEMPLATE_INFO (t))
*walk_subtrees = 0;
else
WALK_SUBTREE (TYPE_TI_ARGS (t));
break;
case INTEGER_TYPE:
WALK_SUBTREE (TYPE_MIN_VALUE (t));
WALK_SUBTREE (TYPE_MAX_VALUE (t));
break;
case METHOD_TYPE:
/* Since we're not going to walk subtrees, we have to do this
explicitly here. */
WALK_SUBTREE (TYPE_METHOD_BASETYPE (t));
/* Fall through. */
case FUNCTION_TYPE:
/* Check the return type. */
WALK_SUBTREE (TREE_TYPE (t));
/* Check the parameter types. Since default arguments are not
instantiated until they are needed, the TYPE_ARG_TYPES may
contain expressions that involve template parameters. But,
no-one should be looking at them yet. And, once they're
instantiated, they don't contain template parameters, so
there's no point in looking at them then, either. */
{
tree parm;
for (parm = TYPE_ARG_TYPES (t); parm; parm = TREE_CHAIN (parm))
WALK_SUBTREE (TREE_VALUE (parm));
/* Since we've already handled the TYPE_ARG_TYPES, we don't
want walk_tree walking into them itself. */
*walk_subtrees = 0;
}
if (flag_noexcept_type)
{
tree spec = TYPE_RAISES_EXCEPTIONS (t);
if (spec)
WALK_SUBTREE (TREE_PURPOSE (spec));
}
break;
case TYPEOF_TYPE:
case UNDERLYING_TYPE:
if (pfd->include_nondeduced_p
&& for_each_template_parm (TYPE_VALUES_RAW (t), fn, data,
pfd->visited,
pfd->include_nondeduced_p,
pfd->any_fn))
return error_mark_node;
break;
case FUNCTION_DECL:
case VAR_DECL:
if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t))
WALK_SUBTREE (DECL_TI_ARGS (t));
/* Fall through. */
case PARM_DECL:
case CONST_DECL:
if (TREE_CODE (t) == CONST_DECL && DECL_TEMPLATE_PARM_P (t))
WALK_SUBTREE (DECL_INITIAL (t));
if (DECL_CONTEXT (t)
&& pfd->include_nondeduced_p)
WALK_SUBTREE (DECL_CONTEXT (t));
break;
case BOUND_TEMPLATE_TEMPLATE_PARM:
/* Record template parameters such as `T' inside `TT<T>'. */
WALK_SUBTREE (TYPE_TI_ARGS (t));
/* Fall through. */
case TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_PARM_INDEX:
if (fn && (*fn)(t, data))
return t;
else if (!fn)
return t;
break;
case TEMPLATE_DECL:
/* A template template parameter is encountered. */
if (DECL_TEMPLATE_TEMPLATE_PARM_P (t))
WALK_SUBTREE (TREE_TYPE (t));
/* Already substituted template template parameter */
*walk_subtrees = 0;
break;
case TYPENAME_TYPE:
/* A template-id in a TYPENAME_TYPE might be a deduced context after
partial instantiation. */
WALK_SUBTREE (TYPENAME_TYPE_FULLNAME (t));
break;
case CONSTRUCTOR:
if (TREE_TYPE (t) && TYPE_PTRMEMFUNC_P (TREE_TYPE (t))
&& pfd->include_nondeduced_p)
WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (t)));
break;
case INDIRECT_REF:
case COMPONENT_REF:
/* If there's no type, then this thing must be some expression
involving template parameters. */
if (!fn && !TREE_TYPE (t))
return error_mark_node;
break;
case MODOP_EXPR:
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case STATIC_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case ARROW_EXPR:
case DOTSTAR_EXPR:
case TYPEID_EXPR:
case PSEUDO_DTOR_EXPR:
if (!fn)
return error_mark_node;
break;
default:
break;
}
#undef WALK_SUBTREE
/* We didn't find any template parameters we liked. */
out:
return result;
}
/* For each TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM,
BOUND_TEMPLATE_TEMPLATE_PARM or TEMPLATE_PARM_INDEX in T,
call FN with the parameter and the DATA.
If FN returns nonzero, the iteration is terminated, and
for_each_template_parm returns 1. Otherwise, the iteration
continues. If FN never returns a nonzero value, the value
returned by for_each_template_parm is 0. If FN is NULL, it is
considered to be the function which always returns 1.
If INCLUDE_NONDEDUCED_P, then this routine will also visit template
parameters that occur in non-deduced contexts. When false, only
visits those template parameters that can be deduced. */
static tree
for_each_template_parm (tree t, tree_fn_t fn, void* data,
hash_set<tree> *visited,
bool include_nondeduced_p,
tree_fn_t any_fn)
{
struct pair_fn_data pfd;
tree result;
/* Set up. */
pfd.fn = fn;
pfd.any_fn = any_fn;
pfd.data = data;
pfd.include_nondeduced_p = include_nondeduced_p;
/* Walk the tree. (Conceptually, we would like to walk without
duplicates, but for_each_template_parm_r recursively calls
for_each_template_parm, so we would need to reorganize a fair
bit to use walk_tree_without_duplicates, so we keep our own
visited list.) */
if (visited)
pfd.visited = visited;
else
pfd.visited = new hash_set<tree>;
result = cp_walk_tree (&t,
for_each_template_parm_r,
&pfd,
pfd.visited);
/* Clean up. */
if (!visited)
{
delete pfd.visited;
pfd.visited = 0;
}
return result;
}
/* Returns true if T depends on any template parameter. */
int
uses_template_parms (tree t)
{
if (t == NULL_TREE)
return false;
bool dependent_p;
int saved_processing_template_decl;
saved_processing_template_decl = processing_template_decl;
if (!saved_processing_template_decl)
processing_template_decl = 1;
if (TYPE_P (t))
dependent_p = dependent_type_p (t);
else if (TREE_CODE (t) == TREE_VEC)
dependent_p = any_dependent_template_arguments_p (t);
else if (TREE_CODE (t) == TREE_LIST)
dependent_p = (uses_template_parms (TREE_VALUE (t))
|| uses_template_parms (TREE_CHAIN (t)));
else if (TREE_CODE (t) == TYPE_DECL)
dependent_p = dependent_type_p (TREE_TYPE (t));
else if (DECL_P (t)
|| EXPR_P (t)
|| TREE_CODE (t) == TEMPLATE_PARM_INDEX
|| TREE_CODE (t) == OVERLOAD
|| BASELINK_P (t)
|| identifier_p (t)
|| TREE_CODE (t) == TRAIT_EXPR
|| TREE_CODE (t) == CONSTRUCTOR
|| CONSTANT_CLASS_P (t))
dependent_p = (type_dependent_expression_p (t)
|| value_dependent_expression_p (t));
else
{
gcc_assert (t == error_mark_node);
dependent_p = false;
}
processing_template_decl = saved_processing_template_decl;
return dependent_p;
}
/* Returns true iff current_function_decl is an incompletely instantiated
template. Useful instead of processing_template_decl because the latter
is set to 0 during instantiate_non_dependent_expr. */
bool
in_template_function (void)
{
tree fn = current_function_decl;
bool ret;
++processing_template_decl;
ret = (fn && DECL_LANG_SPECIFIC (fn)
&& DECL_TEMPLATE_INFO (fn)
&& any_dependent_template_arguments_p (DECL_TI_ARGS (fn)));
--processing_template_decl;
return ret;
}
/* Returns true if T depends on any template parameter with level LEVEL. */
bool
uses_template_parms_level (tree t, int level)
{
return for_each_template_parm (t, template_parm_this_level_p, &level, NULL,
/*include_nondeduced_p=*/true);
}
/* Returns true if the signature of DECL depends on any template parameter from
its enclosing class. */
bool
uses_outer_template_parms (tree decl)
{
int depth = template_class_depth (CP_DECL_CONTEXT (decl));
if (depth == 0)
return false;
if (for_each_template_parm (TREE_TYPE (decl), template_parm_outer_level,
&depth, NULL, /*include_nondeduced_p=*/true))
return true;
if (PRIMARY_TEMPLATE_P (decl)
&& for_each_template_parm (INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (decl)),
template_parm_outer_level,
&depth, NULL, /*include_nondeduced_p=*/true))
return true;
tree ci = get_constraints (decl);
if (ci)
ci = CI_ASSOCIATED_CONSTRAINTS (ci);
if (ci && for_each_template_parm (ci, template_parm_outer_level,
&depth, NULL, /*nondeduced*/true))
return true;
return false;
}
/* Returns TRUE iff INST is an instantiation we don't need to do in an
ill-formed translation unit, i.e. a variable or function that isn't
usable in a constant expression. */
static inline bool
neglectable_inst_p (tree d)
{
return (DECL_P (d)
&& !(TREE_CODE (d) == FUNCTION_DECL ? DECL_DECLARED_CONSTEXPR_P (d)
: decl_maybe_constant_var_p (d)));
}
/* Returns TRUE iff we should refuse to instantiate DECL because it's
neglectable and instantiated from within an erroneous instantiation. */
static bool
limit_bad_template_recursion (tree decl)
{
struct tinst_level *lev = current_tinst_level;
int errs = errorcount + sorrycount;
if (lev == NULL || errs == 0 || !neglectable_inst_p (decl))
return false;
for (; lev; lev = lev->next)
if (neglectable_inst_p (lev->decl))
break;
return (lev && errs > lev->errors);
}
static int tinst_depth;
extern int max_tinst_depth;
int depth_reached;
static GTY(()) struct tinst_level *last_error_tinst_level;
/* We're starting to instantiate D; record the template instantiation context
for diagnostics and to restore it later. */
bool
push_tinst_level (tree d)
{
return push_tinst_level_loc (d, input_location);
}
/* We're starting to instantiate D; record the template instantiation context
at LOC for diagnostics and to restore it later. */
bool
push_tinst_level_loc (tree d, location_t loc)
{
struct tinst_level *new_level;
if (tinst_depth >= max_tinst_depth)
{
/* Tell error.c not to try to instantiate any templates. */
at_eof = 2;
fatal_error (input_location,
"template instantiation depth exceeds maximum of %d"
" (use -ftemplate-depth= to increase the maximum)",
max_tinst_depth);
return false;
}
/* If the current instantiation caused problems, don't let it instantiate
anything else. Do allow deduction substitution and decls usable in
constant expressions. */
if (limit_bad_template_recursion (d))
return false;
/* When not -quiet, dump template instantiations other than functions, since
announce_function will take care of those. */
if (!quiet_flag
&& TREE_CODE (d) != TREE_LIST
&& TREE_CODE (d) != FUNCTION_DECL)
fprintf (stderr, " %s", decl_as_string (d, TFF_DECL_SPECIFIERS));
new_level = ggc_alloc<tinst_level> ();
new_level->decl = d;
new_level->locus = loc;
new_level->errors = errorcount+sorrycount;
new_level->in_system_header_p = in_system_header_at (input_location);
new_level->next = current_tinst_level;
current_tinst_level = new_level;
++tinst_depth;
if (GATHER_STATISTICS && (tinst_depth > depth_reached))
depth_reached = tinst_depth;
return true;
}
/* We're done instantiating this template; return to the instantiation
context. */
void
pop_tinst_level (void)
{
/* Restore the filename and line number stashed away when we started
this instantiation. */
input_location = current_tinst_level->locus;
current_tinst_level = current_tinst_level->next;
--tinst_depth;
}
/* We're instantiating a deferred template; restore the template
instantiation context in which the instantiation was requested, which
is one step out from LEVEL. Return the corresponding DECL or TYPE. */
static tree
reopen_tinst_level (struct tinst_level *level)
{
struct tinst_level *t;
tinst_depth = 0;
for (t = level; t; t = t->next)
++tinst_depth;
current_tinst_level = level;
pop_tinst_level ();
if (current_tinst_level)
current_tinst_level->errors = errorcount+sorrycount;
return level->decl;
}
/* Returns the TINST_LEVEL which gives the original instantiation
context. */
struct tinst_level *
outermost_tinst_level (void)
{
struct tinst_level *level = current_tinst_level;
if (level)
while (level->next)
level = level->next;
return level;
}
/* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL. ARGS is the
vector of template arguments, as for tsubst.
Returns an appropriate tsubst'd friend declaration. */
static tree
tsubst_friend_function (tree decl, tree args)
{
tree new_friend;
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_TEMPLATE_INSTANTIATION (decl)
&& TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL)
/* This was a friend declared with an explicit template
argument list, e.g.:
friend void f<>(T);
to indicate that f was a template instantiation, not a new
function declaration. Now, we have to figure out what
instantiation of what template. */
{
tree template_id, arglist, fns;
tree new_args;
tree tmpl;
tree ns = decl_namespace_context (TYPE_MAIN_DECL (current_class_type));
/* Friend functions are looked up in the containing namespace scope.
We must enter that scope, to avoid finding member functions of the
current class with same name. */
push_nested_namespace (ns);
fns = tsubst_expr (DECL_TI_TEMPLATE (decl), args,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/false);
pop_nested_namespace (ns);
arglist = tsubst (DECL_TI_ARGS (decl), args,
tf_warning_or_error, NULL_TREE);
template_id = lookup_template_function (fns, arglist);
new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE);
tmpl = determine_specialization (template_id, new_friend,
&new_args,
/*need_member_template=*/0,
TREE_VEC_LENGTH (args),
tsk_none);
return instantiate_template (tmpl, new_args, tf_error);
}
new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE);
/* The NEW_FRIEND will look like an instantiation, to the
compiler, but is not an instantiation from the point of view of
the language. For example, we might have had:
template <class T> struct S {
template <class U> friend void f(T, U);
};
Then, in S<int>, template <class U> void f(int, U) is not an
instantiation of anything. */
if (new_friend == error_mark_node)
return error_mark_node;
DECL_USE_TEMPLATE (new_friend) = 0;
if (TREE_CODE (decl) == TEMPLATE_DECL)
{
DECL_USE_TEMPLATE (DECL_TEMPLATE_RESULT (new_friend)) = 0;
DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (new_friend))
= DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (decl));
}
/* The mangled name for the NEW_FRIEND is incorrect. The function
is not a template instantiation and should not be mangled like
one. Therefore, we forget the mangling here; we'll recompute it
later if we need it. */
if (TREE_CODE (new_friend) != TEMPLATE_DECL)
{
SET_DECL_RTL (new_friend, NULL);
SET_DECL_ASSEMBLER_NAME (new_friend, NULL_TREE);
}
if (DECL_NAMESPACE_SCOPE_P (new_friend))
{
tree old_decl;
tree new_friend_template_info;
tree new_friend_result_template_info;
tree ns;
int new_friend_is_defn;
/* We must save some information from NEW_FRIEND before calling
duplicate decls since that function will free NEW_FRIEND if
possible. */
new_friend_template_info = DECL_TEMPLATE_INFO (new_friend);
new_friend_is_defn =
(DECL_INITIAL (DECL_TEMPLATE_RESULT
(template_for_substitution (new_friend)))
!= NULL_TREE);
if (TREE_CODE (new_friend) == TEMPLATE_DECL)
{
/* This declaration is a `primary' template. */
DECL_PRIMARY_TEMPLATE (new_friend) = new_friend;
new_friend_result_template_info
= DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (new_friend));
}
else
new_friend_result_template_info = NULL_TREE;
/* Inside pushdecl_namespace_level, we will push into the
current namespace. However, the friend function should go
into the namespace of the template. */
ns = decl_namespace_context (new_friend);
push_nested_namespace (ns);
old_decl = pushdecl_namespace_level (new_friend, /*is_friend=*/true);
pop_nested_namespace (ns);
if (old_decl == error_mark_node)
return error_mark_node;
if (old_decl != new_friend)
{
/* This new friend declaration matched an existing
declaration. For example, given:
template <class T> void f(T);
template <class U> class C {
template <class T> friend void f(T) {}
};
the friend declaration actually provides the definition
of `f', once C has been instantiated for some type. So,
old_decl will be the out-of-class template declaration,
while new_friend is the in-class definition.
But, if `f' was called before this point, the
instantiation of `f' will have DECL_TI_ARGS corresponding
to `T' but not to `U', references to which might appear
in the definition of `f'. Previously, the most general
template for an instantiation of `f' was the out-of-class
version; now it is the in-class version. Therefore, we
run through all specialization of `f', adding to their
DECL_TI_ARGS appropriately. In particular, they need a
new set of outer arguments, corresponding to the
arguments for this class instantiation.
The same situation can arise with something like this:
friend void f(int);
template <class T> class C {
friend void f(T) {}
};
when `C<int>' is instantiated. Now, `f(int)' is defined
in the class. */
if (!new_friend_is_defn)
/* On the other hand, if the in-class declaration does
*not* provide a definition, then we don't want to alter
existing definitions. We can just leave everything
alone. */
;
else
{
tree new_template = TI_TEMPLATE (new_friend_template_info);
tree new_args = TI_ARGS (new_friend_template_info);
/* Overwrite whatever template info was there before, if
any, with the new template information pertaining to
the declaration. */
DECL_TEMPLATE_INFO (old_decl) = new_friend_template_info;
if (TREE_CODE (old_decl) != TEMPLATE_DECL)
{
/* We should have called reregister_specialization in
duplicate_decls. */
gcc_assert (retrieve_specialization (new_template,
new_args, 0)
== old_decl);
/* Instantiate it if the global has already been used. */
if (DECL_ODR_USED (old_decl))
instantiate_decl (old_decl, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/false);
}
else
{
tree t;
/* Indicate that the old function template is a partial
instantiation. */
DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (old_decl))
= new_friend_result_template_info;
gcc_assert (new_template
== most_general_template (new_template));
gcc_assert (new_template != old_decl);
/* Reassign any specializations already in the hash table
to the new more general template, and add the
additional template args. */
for (t = DECL_TEMPLATE_INSTANTIATIONS (old_decl);
t != NULL_TREE;
t = TREE_CHAIN (t))
{
tree spec = TREE_VALUE (t);
spec_entry elt;
elt.tmpl = old_decl;
elt.args = DECL_TI_ARGS (spec);
elt.spec = NULL_TREE;
decl_specializations->remove_elt (&elt);
DECL_TI_ARGS (spec)
= add_outermost_template_args (new_args,
DECL_TI_ARGS (spec));
register_specialization
(spec, new_template, DECL_TI_ARGS (spec), true, 0);
}
DECL_TEMPLATE_INSTANTIATIONS (old_decl) = NULL_TREE;
}
}
/* The information from NEW_FRIEND has been merged into OLD_DECL
by duplicate_decls. */
new_friend = old_decl;
}
}
else
{
tree context = DECL_CONTEXT (new_friend);
bool dependent_p;
/* In the code
template <class T> class C {
template <class U> friend void C1<U>::f (); // case 1
friend void C2<T>::f (); // case 2
};
we only need to make sure CONTEXT is a complete type for
case 2. To distinguish between the two cases, we note that
CONTEXT of case 1 remains dependent type after tsubst while
this isn't true for case 2. */
++processing_template_decl;
dependent_p = dependent_type_p (context);
--processing_template_decl;
if (!dependent_p
&& !complete_type_or_else (context, NULL_TREE))
return error_mark_node;
if (COMPLETE_TYPE_P (context))
{
tree fn = new_friend;
/* do_friend adds the TEMPLATE_DECL for any member friend
template even if it isn't a member template, i.e.
template <class T> friend A<T>::f();
Look through it in that case. */
if (TREE_CODE (fn) == TEMPLATE_DECL
&& !PRIMARY_TEMPLATE_P (fn))
fn = DECL_TEMPLATE_RESULT (fn);
/* Check to see that the declaration is really present, and,
possibly obtain an improved declaration. */
fn = check_classfn (context, fn, NULL_TREE);
if (fn)
new_friend = fn;
}
}
return new_friend;
}
/* FRIEND_TMPL is a friend TEMPLATE_DECL. ARGS is the vector of
template arguments, as for tsubst.
Returns an appropriate tsubst'd friend type or error_mark_node on
failure. */
static tree
tsubst_friend_class (tree friend_tmpl, tree args)
{
tree friend_type;
tree tmpl;
tree context;
if (DECL_TEMPLATE_TEMPLATE_PARM_P (friend_tmpl))
{
tree t = tsubst (TREE_TYPE (friend_tmpl), args, tf_none, NULL_TREE);
return TREE_TYPE (t);
}
context = CP_DECL_CONTEXT (friend_tmpl);
if (context != global_namespace)
{
if (TREE_CODE (context) == NAMESPACE_DECL)
push_nested_namespace (context);
else
push_nested_class (tsubst (context, args, tf_none, NULL_TREE));
}
/* Look for a class template declaration. We look for hidden names
because two friend declarations of the same template are the
same. For example, in:
struct A {
template <typename> friend class F;
};
template <typename> struct B {
template <typename> friend class F;
};
both F templates are the same. */
tmpl = lookup_name_real (DECL_NAME (friend_tmpl), 0, 0,
/*block_p=*/true, 0, LOOKUP_HIDDEN);
/* But, if we don't find one, it might be because we're in a
situation like this:
template <class T>
struct S {
template <class U>
friend struct S;
};
Here, in the scope of (say) S<int>, `S' is bound to a TYPE_DECL
for `S<int>', not the TEMPLATE_DECL. */
if (!tmpl || !DECL_CLASS_TEMPLATE_P (tmpl))
{
tmpl = lookup_name_prefer_type (DECL_NAME (friend_tmpl), 1);
tmpl = maybe_get_template_decl_from_type_decl (tmpl);
}
if (tmpl && DECL_CLASS_TEMPLATE_P (tmpl))
{
/* The friend template has already been declared. Just
check to see that the declarations match, and install any new
default parameters. We must tsubst the default parameters,
of course. We only need the innermost template parameters
because that is all that redeclare_class_template will look
at. */
if (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (friend_tmpl))
> TMPL_ARGS_DEPTH (args))
{
tree parms;
location_t saved_input_location;
parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_tmpl),
args, tf_warning_or_error);
saved_input_location = input_location;
input_location = DECL_SOURCE_LOCATION (friend_tmpl);
tree cons = get_constraints (tmpl);
redeclare_class_template (TREE_TYPE (tmpl), parms, cons);
input_location = saved_input_location;
}
friend_type = TREE_TYPE (tmpl);
}
else
{
/* The friend template has not already been declared. In this
case, the instantiation of the template class will cause the
injection of this template into the global scope. */
tmpl = tsubst (friend_tmpl, args, tf_warning_or_error, NULL_TREE);
if (tmpl == error_mark_node)
return error_mark_node;
/* The new TMPL is not an instantiation of anything, so we
forget its origins. We don't reset CLASSTYPE_TI_TEMPLATE for
the new type because that is supposed to be the corresponding
template decl, i.e., TMPL. */
DECL_USE_TEMPLATE (tmpl) = 0;
DECL_TEMPLATE_INFO (tmpl) = NULL_TREE;
CLASSTYPE_USE_TEMPLATE (TREE_TYPE (tmpl)) = 0;
CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl))
= INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl)));
/* Inject this template into the global scope. */
friend_type = TREE_TYPE (pushdecl_top_level_maybe_friend (tmpl, true));
}
if (context != global_namespace)
{
if (TREE_CODE (context) == NAMESPACE_DECL)
pop_nested_namespace (context);
else
pop_nested_class ();
}
return friend_type;
}
/* Returns zero if TYPE cannot be completed later due to circularity.
Otherwise returns one. */
static int
can_complete_type_without_circularity (tree type)
{
if (type == NULL_TREE || type == error_mark_node)
return 0;
else if (COMPLETE_TYPE_P (type))
return 1;
else if (TREE_CODE (type) == ARRAY_TYPE)
return can_complete_type_without_circularity (TREE_TYPE (type));
else if (CLASS_TYPE_P (type)
&& TYPE_BEING_DEFINED (TYPE_MAIN_VARIANT (type)))
return 0;
else
return 1;
}
static tree tsubst_omp_clauses (tree, enum c_omp_region_type, tree,
tsubst_flags_t, tree);
/* Instantiate a single dependent attribute T (a TREE_LIST), and return either
T or a new TREE_LIST, possibly a chain in the case of a pack expansion. */
static tree
tsubst_attribute (tree t, tree *decl_p, tree args,
tsubst_flags_t complain, tree in_decl)
{
gcc_assert (ATTR_IS_DEPENDENT (t));
tree val = TREE_VALUE (t);
if (val == NULL_TREE)
/* Nothing to do. */;
else if ((flag_openmp || flag_openmp_simd || flag_cilkplus)
&& is_attribute_p ("omp declare simd",
get_attribute_name (t)))
{
tree clauses = TREE_VALUE (val);
clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args,
complain, in_decl);
c_omp_declare_simd_clauses_to_decls (*decl_p, clauses);
clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD);
tree parms = DECL_ARGUMENTS (*decl_p);
clauses
= c_omp_declare_simd_clauses_to_numbers (parms, clauses);
if (clauses)
val = build_tree_list (NULL_TREE, clauses);
else
val = NULL_TREE;
}
/* If the first attribute argument is an identifier, don't
pass it through tsubst. Attributes like mode, format,
cleanup and several target specific attributes expect it
unmodified. */
else if (attribute_takes_identifier_p (get_attribute_name (t)))
{
tree chain
= tsubst_expr (TREE_CHAIN (val), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
if (chain != TREE_CHAIN (val))
val = tree_cons (NULL_TREE, TREE_VALUE (val), chain);
}
else if (PACK_EXPANSION_P (val))
{
/* An attribute pack expansion. */
tree purp = TREE_PURPOSE (t);
tree pack = tsubst_pack_expansion (val, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
int len = TREE_VEC_LENGTH (pack);
tree list = NULL_TREE;
tree *q = &list;
for (int i = 0; i < len; ++i)
{
tree elt = TREE_VEC_ELT (pack, i);
*q = build_tree_list (purp, elt);
q = &TREE_CHAIN (*q);
}
return list;
}
else
val = tsubst_expr (val, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
if (val != TREE_VALUE (t))
return build_tree_list (TREE_PURPOSE (t), val);
return t;
}
/* Instantiate any dependent attributes in ATTRIBUTES, returning either it
unchanged or a new TREE_LIST chain. */
static tree
tsubst_attributes (tree attributes, tree args,
tsubst_flags_t complain, tree in_decl)
{
tree last_dep = NULL_TREE;
for (tree t = attributes; t; t = TREE_CHAIN (t))
if (ATTR_IS_DEPENDENT (t))
{
last_dep = t;
attributes = copy_list (attributes);
break;
}
if (last_dep)
for (tree *p = &attributes; *p; )
{
tree t = *p;
if (ATTR_IS_DEPENDENT (t))
{
tree subst = tsubst_attribute (t, NULL, args, complain, in_decl);
if (subst != t)
{
*p = subst;
do
p = &TREE_CHAIN (*p);
while (*p);
*p = TREE_CHAIN (t);
continue;
}
}
p = &TREE_CHAIN (*p);
}
return attributes;
}
/* Apply any attributes which had to be deferred until instantiation
time. DECL_P, ATTRIBUTES and ATTR_FLAGS are as cplus_decl_attributes;
ARGS, COMPLAIN, IN_DECL are as tsubst. */
static void
apply_late_template_attributes (tree *decl_p, tree attributes, int attr_flags,
tree args, tsubst_flags_t complain, tree in_decl)
{
tree last_dep = NULL_TREE;
tree t;
tree *p;
if (attributes == NULL_TREE)
return;
if (DECL_P (*decl_p))
{
if (TREE_TYPE (*decl_p) == error_mark_node)
return;
p = &DECL_ATTRIBUTES (*decl_p);
/* DECL_ATTRIBUTES comes from copy_node in tsubst_decl, and is identical
to our attributes parameter. */
gcc_assert (*p == attributes);
}
else
{
p = &TYPE_ATTRIBUTES (*decl_p);
/* TYPE_ATTRIBUTES was set up (with abi_tag and may_alias) in
lookup_template_class_1, and should be preserved. */
gcc_assert (*p != attributes);
while (*p)
p = &TREE_CHAIN (*p);
}
for (t = attributes; t; t = TREE_CHAIN (t))
if (ATTR_IS_DEPENDENT (t))
{
last_dep = t;
attributes = copy_list (attributes);
break;
}
*p = attributes;
if (last_dep)
{
tree late_attrs = NULL_TREE;
tree *q = &late_attrs;
for (; *p; )
{
t = *p;
if (ATTR_IS_DEPENDENT (t))
{
*p = TREE_CHAIN (t);
TREE_CHAIN (t) = NULL_TREE;
*q = tsubst_attribute (t, decl_p, args, complain, in_decl);
do
q = &TREE_CHAIN (*q);
while (*q);
}
else
p = &TREE_CHAIN (t);
}
cplus_decl_attributes (decl_p, late_attrs, attr_flags);
}
}
/* Perform (or defer) access check for typedefs that were referenced
from within the template TMPL code.
This is a subroutine of instantiate_decl and instantiate_class_template.
TMPL is the template to consider and TARGS is the list of arguments of
that template. */
static void
perform_typedefs_access_check (tree tmpl, tree targs)
{
location_t saved_location;
unsigned i;
qualified_typedef_usage_t *iter;
if (!tmpl
|| (!CLASS_TYPE_P (tmpl)
&& TREE_CODE (tmpl) != FUNCTION_DECL))
return;
saved_location = input_location;
FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (tmpl), i, iter)
{
tree type_decl = iter->typedef_decl;
tree type_scope = iter->context;
if (!type_decl || !type_scope || !CLASS_TYPE_P (type_scope))
continue;
if (uses_template_parms (type_decl))
type_decl = tsubst (type_decl, targs, tf_error, NULL_TREE);
if (uses_template_parms (type_scope))
type_scope = tsubst (type_scope, targs, tf_error, NULL_TREE);
/* Make access check error messages point to the location
of the use of the typedef. */
input_location = iter->locus;
perform_or_defer_access_check (TYPE_BINFO (type_scope),
type_decl, type_decl,
tf_warning_or_error);
}
input_location = saved_location;
}
static tree
instantiate_class_template_1 (tree type)
{
tree templ, args, pattern, t, member;
tree typedecl;
tree pbinfo;
tree base_list;
unsigned int saved_maximum_field_alignment;
tree fn_context;
if (type == error_mark_node)
return error_mark_node;
if (COMPLETE_OR_OPEN_TYPE_P (type)
|| uses_template_parms (type))
return type;
/* Figure out which template is being instantiated. */
templ = most_general_template (CLASSTYPE_TI_TEMPLATE (type));
gcc_assert (TREE_CODE (templ) == TEMPLATE_DECL);
/* Determine what specialization of the original template to
instantiate. */
t = most_specialized_partial_spec (type, tf_warning_or_error);
if (t == error_mark_node)
{
TYPE_BEING_DEFINED (type) = 1;
return error_mark_node;
}
else if (t)
{
/* This TYPE is actually an instantiation of a partial
specialization. We replace the innermost set of ARGS with
the arguments appropriate for substitution. For example,
given:
template <class T> struct S {};
template <class T> struct S<T*> {};
and supposing that we are instantiating S<int*>, ARGS will
presently be {int*} -- but we need {int}. */
pattern = TREE_TYPE (t);
args = TREE_PURPOSE (t);
}
else
{
pattern = TREE_TYPE (templ);
args = CLASSTYPE_TI_ARGS (type);
}
/* If the template we're instantiating is incomplete, then clearly
there's nothing we can do. */
if (!COMPLETE_TYPE_P (pattern))
return type;
/* If we've recursively instantiated too many templates, stop. */
if (! push_tinst_level (type))
return type;
/* Now we're really doing the instantiation. Mark the type as in
the process of being defined. */
TYPE_BEING_DEFINED (type) = 1;
/* We may be in the middle of deferred access check. Disable
it now. */
push_deferring_access_checks (dk_no_deferred);
int saved_unevaluated_operand = cp_unevaluated_operand;
int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
fn_context = decl_function_context (TYPE_MAIN_DECL (type));
/* Also avoid push_to_top_level for a lambda in an NSDMI. */
if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type))
fn_context = error_mark_node;
if (!fn_context)
push_to_top_level ();
else
{
cp_unevaluated_operand = 0;
c_inhibit_evaluation_warnings = 0;
}
/* Use #pragma pack from the template context. */
saved_maximum_field_alignment = maximum_field_alignment;
maximum_field_alignment = TYPE_PRECISION (pattern);
SET_CLASSTYPE_INTERFACE_UNKNOWN (type);
/* Set the input location to the most specialized template definition.
This is needed if tsubsting causes an error. */
typedecl = TYPE_MAIN_DECL (pattern);
input_location = DECL_SOURCE_LOCATION (TYPE_NAME (type)) =
DECL_SOURCE_LOCATION (typedecl);
TYPE_PACKED (type) = TYPE_PACKED (pattern);
SET_TYPE_ALIGN (type, TYPE_ALIGN (pattern));
TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (pattern);
CLASSTYPE_NON_AGGREGATE (type) = CLASSTYPE_NON_AGGREGATE (pattern);
if (ANON_AGGR_TYPE_P (pattern))
SET_ANON_AGGR_TYPE_P (type);
if (CLASSTYPE_VISIBILITY_SPECIFIED (pattern))
{
CLASSTYPE_VISIBILITY_SPECIFIED (type) = 1;
CLASSTYPE_VISIBILITY (type) = CLASSTYPE_VISIBILITY (pattern);
/* Adjust visibility for template arguments. */
determine_visibility (TYPE_MAIN_DECL (type));
}
if (CLASS_TYPE_P (type))
CLASSTYPE_FINAL (type) = CLASSTYPE_FINAL (pattern);
pbinfo = TYPE_BINFO (pattern);
/* We should never instantiate a nested class before its enclosing
class; we need to look up the nested class by name before we can
instantiate it, and that lookup should instantiate the enclosing
class. */
gcc_assert (!DECL_CLASS_SCOPE_P (TYPE_MAIN_DECL (pattern))
|| COMPLETE_OR_OPEN_TYPE_P (TYPE_CONTEXT (type)));
base_list = NULL_TREE;
if (BINFO_N_BASE_BINFOS (pbinfo))
{
tree pbase_binfo;
tree pushed_scope;
int i;
/* We must enter the scope containing the type, as that is where
the accessibility of types named in dependent bases are
looked up from. */
pushed_scope = push_scope (CP_TYPE_CONTEXT (type));
/* Substitute into each of the bases to determine the actual
basetypes. */
for (i = 0; BINFO_BASE_ITERATE (pbinfo, i, pbase_binfo); i++)
{
tree base;
tree access = BINFO_BASE_ACCESS (pbinfo, i);
tree expanded_bases = NULL_TREE;
int idx, len = 1;
if (PACK_EXPANSION_P (BINFO_TYPE (pbase_binfo)))
{
expanded_bases =
tsubst_pack_expansion (BINFO_TYPE (pbase_binfo),
args, tf_error, NULL_TREE);
if (expanded_bases == error_mark_node)
continue;
len = TREE_VEC_LENGTH (expanded_bases);
}
for (idx = 0; idx < len; idx++)
{
if (expanded_bases)
/* Extract the already-expanded base class. */
base = TREE_VEC_ELT (expanded_bases, idx);
else
/* Substitute to figure out the base class. */
base = tsubst (BINFO_TYPE (pbase_binfo), args, tf_error,
NULL_TREE);
if (base == error_mark_node)
continue;
base_list = tree_cons (access, base, base_list);
if (BINFO_VIRTUAL_P (pbase_binfo))
TREE_TYPE (base_list) = integer_type_node;
}
}
/* The list is now in reverse order; correct that. */
base_list = nreverse (base_list);
if (pushed_scope)
pop_scope (pushed_scope);
}
/* Now call xref_basetypes to set up all the base-class
information. */
xref_basetypes (type, base_list);
apply_late_template_attributes (&type, TYPE_ATTRIBUTES (pattern),
(int) ATTR_FLAG_TYPE_IN_PLACE,
args, tf_error, NULL_TREE);
fixup_attribute_variants (type);
/* Now that our base classes are set up, enter the scope of the
class, so that name lookups into base classes, etc. will work
correctly. This is precisely analogous to what we do in
begin_class_definition when defining an ordinary non-template
class, except we also need to push the enclosing classes. */
push_nested_class (type);
/* Now members are processed in the order of declaration. */
for (member = CLASSTYPE_DECL_LIST (pattern);
member; member = TREE_CHAIN (member))
{
tree t = TREE_VALUE (member);
if (TREE_PURPOSE (member))
{
if (TYPE_P (t))
{
/* Build new CLASSTYPE_NESTED_UTDS. */
tree newtag;
bool class_template_p;
class_template_p = (TREE_CODE (t) != ENUMERAL_TYPE
&& TYPE_LANG_SPECIFIC (t)
&& CLASSTYPE_IS_TEMPLATE (t));
/* If the member is a class template, then -- even after
substitution -- there may be dependent types in the
template argument list for the class. We increment
PROCESSING_TEMPLATE_DECL so that dependent_type_p, as
that function will assume that no types are dependent
when outside of a template. */
if (class_template_p)
++processing_template_decl;
newtag = tsubst (t, args, tf_error, NULL_TREE);
if (class_template_p)
--processing_template_decl;
if (newtag == error_mark_node)
continue;
if (TREE_CODE (newtag) != ENUMERAL_TYPE)
{
tree name = TYPE_IDENTIFIER (t);
if (class_template_p)
/* Unfortunately, lookup_template_class sets
CLASSTYPE_IMPLICIT_INSTANTIATION for a partial
instantiation (i.e., for the type of a member
template class nested within a template class.)
This behavior is required for
maybe_process_partial_specialization to work
correctly, but is not accurate in this case;
the TAG is not an instantiation of anything.
(The corresponding TEMPLATE_DECL is an
instantiation, but the TYPE is not.) */
CLASSTYPE_USE_TEMPLATE (newtag) = 0;
/* Now, we call pushtag to put this NEWTAG into the scope of
TYPE. We first set up the IDENTIFIER_TYPE_VALUE to avoid
pushtag calling push_template_decl. We don't have to do
this for enums because it will already have been done in
tsubst_enum. */
if (name)
SET_IDENTIFIER_TYPE_VALUE (name, newtag);
pushtag (name, newtag, /*tag_scope=*/ts_current);
}
}
else if (DECL_DECLARES_FUNCTION_P (t))
{
/* Build new TYPE_METHODS. */
tree r;
if (TREE_CODE (t) == TEMPLATE_DECL)
++processing_template_decl;
r = tsubst (t, args, tf_error, NULL_TREE);
if (TREE_CODE (t) == TEMPLATE_DECL)
--processing_template_decl;
set_current_access_from_decl (r);
finish_member_declaration (r);
/* Instantiate members marked with attribute used. */
if (r != error_mark_node && DECL_PRESERVE_P (r))
mark_used (r);
if (TREE_CODE (r) == FUNCTION_DECL
&& DECL_OMP_DECLARE_REDUCTION_P (r))
cp_check_omp_declare_reduction (r);
}
else if (DECL_CLASS_TEMPLATE_P (t)
&& LAMBDA_TYPE_P (TREE_TYPE (t)))
/* A closure type for a lambda in a default argument for a
member template. Ignore it; it will be instantiated with
the default argument. */;
else
{
/* Build new TYPE_FIELDS. */
if (TREE_CODE (t) == STATIC_ASSERT)
{
tree condition;
++c_inhibit_evaluation_warnings;
condition =
tsubst_expr (STATIC_ASSERT_CONDITION (t), args,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/true);
--c_inhibit_evaluation_warnings;
finish_static_assert (condition,
STATIC_ASSERT_MESSAGE (t),
STATIC_ASSERT_SOURCE_LOCATION (t),
/*member_p=*/true);
}
else if (TREE_CODE (t) != CONST_DECL)
{
tree r;
tree vec = NULL_TREE;
int len = 1;
/* The file and line for this declaration, to
assist in error message reporting. Since we
called push_tinst_level above, we don't need to
restore these. */
input_location = DECL_SOURCE_LOCATION (t);
if (TREE_CODE (t) == TEMPLATE_DECL)
++processing_template_decl;
r = tsubst (t, args, tf_warning_or_error, NULL_TREE);
if (TREE_CODE (t) == TEMPLATE_DECL)
--processing_template_decl;
if (TREE_CODE (r) == TREE_VEC)
{
/* A capture pack became multiple fields. */
vec = r;
len = TREE_VEC_LENGTH (vec);
}
for (int i = 0; i < len; ++i)
{
if (vec)
r = TREE_VEC_ELT (vec, i);
if (VAR_P (r))
{
/* In [temp.inst]:
[t]he initialization (and any associated
side-effects) of a static data member does
not occur unless the static data member is
itself used in a way that requires the
definition of the static data member to
exist.
Therefore, we do not substitute into the
initialized for the static data member here. */
finish_static_data_member_decl
(r,
/*init=*/NULL_TREE,
/*init_const_expr_p=*/false,
/*asmspec_tree=*/NULL_TREE,
/*flags=*/0);
/* Instantiate members marked with attribute used. */
if (r != error_mark_node && DECL_PRESERVE_P (r))
mark_used (r);
}
else if (TREE_CODE (r) == FIELD_DECL)
{
/* Determine whether R has a valid type and can be
completed later. If R is invalid, then its type
is replaced by error_mark_node. */
tree rtype = TREE_TYPE (r);
if (can_complete_type_without_circularity (rtype))
complete_type (rtype);
if (!complete_or_array_type_p (rtype))
{
/* If R's type couldn't be completed and
it isn't a flexible array member (whose
type is incomplete by definition) give
an error. */
cxx_incomplete_type_error (r, rtype);
TREE_TYPE (r) = error_mark_node;
}
}
/* If it is a TYPE_DECL for a class-scoped ENUMERAL_TYPE,
such a thing will already have been added to the field
list by tsubst_enum in finish_member_declaration in the
CLASSTYPE_NESTED_UTDS case above. */
if (!(TREE_CODE (r) == TYPE_DECL
&& TREE_CODE (TREE_TYPE (r)) == ENUMERAL_TYPE
&& DECL_ARTIFICIAL (r)))
{
set_current_access_from_decl (r);
finish_member_declaration (r);
}
}
}
}
}
else
{
if (TYPE_P (t) || DECL_CLASS_TEMPLATE_P (t)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (t))
{
/* Build new CLASSTYPE_FRIEND_CLASSES. */
tree friend_type = t;
bool adjust_processing_template_decl = false;
if (TREE_CODE (friend_type) == TEMPLATE_DECL)
{
/* template <class T> friend class C; */
friend_type = tsubst_friend_class (friend_type, args);
adjust_processing_template_decl = true;
}
else if (TREE_CODE (friend_type) == UNBOUND_CLASS_TEMPLATE)
{
/* template <class T> friend class C::D; */
friend_type = tsubst (friend_type, args,
tf_warning_or_error, NULL_TREE);
if (TREE_CODE (friend_type) == TEMPLATE_DECL)
friend_type = TREE_TYPE (friend_type);
adjust_processing_template_decl = true;
}
else if (TREE_CODE (friend_type) == TYPENAME_TYPE
|| TREE_CODE (friend_type) == TEMPLATE_TYPE_PARM)
{
/* This could be either
friend class T::C;
when dependent_type_p is false or
template <class U> friend class T::C;
otherwise. */
/* Bump processing_template_decl in case this is something like
template <class T> friend struct A<T>::B. */
++processing_template_decl;
friend_type = tsubst (friend_type, args,
tf_warning_or_error, NULL_TREE);
if (dependent_type_p (friend_type))
adjust_processing_template_decl = true;
--processing_template_decl;
}
else if (!CLASSTYPE_USE_TEMPLATE (friend_type)
&& hidden_name_p (TYPE_NAME (friend_type)))
{
/* friend class C;
where C hasn't been declared yet. Let's lookup name
from namespace scope directly, bypassing any name that
come from dependent base class. */
tree ns = decl_namespace_context (TYPE_MAIN_DECL (friend_type));
/* The call to xref_tag_from_type does injection for friend
classes. */
push_nested_namespace (ns);
friend_type =
xref_tag_from_type (friend_type, NULL_TREE,
/*tag_scope=*/ts_current);
pop_nested_namespace (ns);
}
else if (uses_template_parms (friend_type))
/* friend class C<T>; */
friend_type = tsubst (friend_type, args,
tf_warning_or_error, NULL_TREE);
/* Otherwise it's
friend class C;
where C is already declared or
friend class C<int>;
We don't have to do anything in these cases. */
if (adjust_processing_template_decl)
/* Trick make_friend_class into realizing that the friend
we're adding is a template, not an ordinary class. It's
important that we use make_friend_class since it will
perform some error-checking and output cross-reference
information. */
++processing_template_decl;
if (friend_type != error_mark_node)
make_friend_class (type, friend_type, /*complain=*/false);
if (adjust_processing_template_decl)
--processing_template_decl;
}
else
{
/* Build new DECL_FRIENDLIST. */
tree r;
/* The file and line for this declaration, to
assist in error message reporting. Since we
called push_tinst_level above, we don't need to
restore these. */
input_location = DECL_SOURCE_LOCATION (t);
if (TREE_CODE (t) == TEMPLATE_DECL)
{
++processing_template_decl;
push_deferring_access_checks (dk_no_check);
}
r = tsubst_friend_function (t, args);
add_friend (type, r, /*complain=*/false);
if (TREE_CODE (t) == TEMPLATE_DECL)
{
pop_deferring_access_checks ();
--processing_template_decl;
}
}
}
}
if (fn_context)
{
/* Restore these before substituting into the lambda capture
initializers. */
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
}
if (tree expr = CLASSTYPE_LAMBDA_EXPR (type))
{
tree decl = lambda_function (type);
if (decl)
{
if (cxx_dialect >= cxx1z)
CLASSTYPE_LITERAL_P (type) = true;
if (!DECL_TEMPLATE_INFO (decl)
|| DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (decl)) != decl)
{
/* Set function_depth to avoid garbage collection. */
++function_depth;
instantiate_decl (decl, /*defer_ok=*/false, false);
--function_depth;
}
/* We need to instantiate the capture list from the template
after we've instantiated the closure members, but before we
consider adding the conversion op. Also keep any captures
that may have been added during instantiation of the op(). */
tree tmpl_expr = CLASSTYPE_LAMBDA_EXPR (pattern);
tree tmpl_cap
= tsubst_copy_and_build (LAMBDA_EXPR_CAPTURE_LIST (tmpl_expr),
args, tf_warning_or_error, NULL_TREE,
false, false);
LAMBDA_EXPR_CAPTURE_LIST (expr)
= chainon (tmpl_cap, nreverse (LAMBDA_EXPR_CAPTURE_LIST (expr)));
maybe_add_lambda_conv_op (type);
}
else
gcc_assert (errorcount);
}
/* Set the file and line number information to whatever is given for
the class itself. This puts error messages involving generated
implicit functions at a predictable point, and the same point
that would be used for non-template classes. */
input_location = DECL_SOURCE_LOCATION (typedecl);
unreverse_member_declarations (type);
finish_struct_1 (type);
TYPE_BEING_DEFINED (type) = 0;
/* We don't instantiate default arguments for member functions. 14.7.1:
The implicit instantiation of a class template specialization causes
the implicit instantiation of the declarations, but not of the
definitions or default arguments, of the class member functions,
member classes, static data members and member templates.... */
/* Some typedefs referenced from within the template code need to be access
checked at template instantiation time, i.e now. These types were
added to the template at parsing time. Let's get those and perform
the access checks then. */
perform_typedefs_access_check (pattern, args);
perform_deferred_access_checks (tf_warning_or_error);
pop_nested_class ();
maximum_field_alignment = saved_maximum_field_alignment;
if (!fn_context)
pop_from_top_level ();
pop_deferring_access_checks ();
pop_tinst_level ();
/* The vtable for a template class can be emitted in any translation
unit in which the class is instantiated. When there is no key
method, however, finish_struct_1 will already have added TYPE to
the keyed_classes list. */
if (TYPE_CONTAINS_VPTR_P (type) && CLASSTYPE_KEY_METHOD (type))
keyed_classes = tree_cons (NULL_TREE, type, keyed_classes);
return type;
}
/* Wrapper for instantiate_class_template_1. */
tree
instantiate_class_template (tree type)
{
tree ret;
timevar_push (TV_TEMPLATE_INST);
ret = instantiate_class_template_1 (type);
timevar_pop (TV_TEMPLATE_INST);
return ret;
}
static tree
tsubst_template_arg (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
tree r;
if (!t)
r = t;
else if (TYPE_P (t))
r = tsubst (t, args, complain, in_decl);
else
{
if (!(complain & tf_warning))
++c_inhibit_evaluation_warnings;
r = tsubst_expr (t, args, complain, in_decl,
/*integral_constant_expression_p=*/true);
if (!(complain & tf_warning))
--c_inhibit_evaluation_warnings;
}
return r;
}
/* Given a function parameter pack TMPL_PARM and some function parameters
instantiated from it at *SPEC_P, return a NONTYPE_ARGUMENT_PACK of them
and set *SPEC_P to point at the next point in the list. */
tree
extract_fnparm_pack (tree tmpl_parm, tree *spec_p)
{
/* Collect all of the extra "packed" parameters into an
argument pack. */
tree parmvec;
tree parmtypevec;
tree argpack = make_node (NONTYPE_ARGUMENT_PACK);
tree argtypepack = cxx_make_type (TYPE_ARGUMENT_PACK);
tree spec_parm = *spec_p;
int i, len;
for (len = 0; spec_parm; ++len, spec_parm = TREE_CHAIN (spec_parm))
if (tmpl_parm
&& !function_parameter_expanded_from_pack_p (spec_parm, tmpl_parm))
break;
/* Fill in PARMVEC and PARMTYPEVEC with all of the parameters. */
parmvec = make_tree_vec (len);
parmtypevec = make_tree_vec (len);
spec_parm = *spec_p;
for (i = 0; i < len; i++, spec_parm = DECL_CHAIN (spec_parm))
{
TREE_VEC_ELT (parmvec, i) = spec_parm;
TREE_VEC_ELT (parmtypevec, i) = TREE_TYPE (spec_parm);
}
/* Build the argument packs. */
SET_ARGUMENT_PACK_ARGS (argpack, parmvec);
SET_ARGUMENT_PACK_ARGS (argtypepack, parmtypevec);
TREE_TYPE (argpack) = argtypepack;
*spec_p = spec_parm;
return argpack;
}
/* Give a chain SPEC_PARM of PARM_DECLs, pack them into a
NONTYPE_ARGUMENT_PACK. */
static tree
make_fnparm_pack (tree spec_parm)
{
return extract_fnparm_pack (NULL_TREE, &spec_parm);
}
/* Return 1 if the Ith element of the argument pack ARG_PACK is a
pack expansion with no extra args, 2 if it has extra args, or 0
if it is not a pack expansion. */
static int
argument_pack_element_is_expansion_p (tree arg_pack, int i)
{
tree vec = ARGUMENT_PACK_ARGS (arg_pack);
if (i >= TREE_VEC_LENGTH (vec))
return 0;
tree elt = TREE_VEC_ELT (vec, i);
if (DECL_P (elt))
/* A decl pack is itself an expansion. */
elt = TREE_TYPE (elt);
if (!PACK_EXPANSION_P (elt))
return 0;
if (PACK_EXPANSION_EXTRA_ARGS (elt))
return 2;
return 1;
}
/* Creates and return an ARGUMENT_PACK_SELECT tree node. */
static tree
make_argument_pack_select (tree arg_pack, unsigned index)
{
tree aps = make_node (ARGUMENT_PACK_SELECT);
ARGUMENT_PACK_SELECT_FROM_PACK (aps) = arg_pack;
ARGUMENT_PACK_SELECT_INDEX (aps) = index;
return aps;
}
/* This is a subroutine of tsubst_pack_expansion.
It returns TRUE if we need to use the PACK_EXPANSION_EXTRA_ARGS
mechanism to store the (non complete list of) arguments of the
substitution and return a non substituted pack expansion, in order
to wait for when we have enough arguments to really perform the
substitution. */
static bool
use_pack_expansion_extra_args_p (tree parm_packs,
int arg_pack_len,
bool has_empty_arg)
{
/* If one pack has an expansion and another pack has a normal
argument or if one pack has an empty argument and an another
one hasn't then tsubst_pack_expansion cannot perform the
substitution and need to fall back on the
PACK_EXPANSION_EXTRA mechanism. */
if (parm_packs == NULL_TREE)
return false;
else if (has_empty_arg)
return true;
bool has_expansion_arg = false;
for (int i = 0 ; i < arg_pack_len; ++i)
{
bool has_non_expansion_arg = false;
for (tree parm_pack = parm_packs;
parm_pack;
parm_pack = TREE_CHAIN (parm_pack))
{
tree arg = TREE_VALUE (parm_pack);
int exp = argument_pack_element_is_expansion_p (arg, i);
if (exp == 2)
/* We can't substitute a pack expansion with extra args into
our pattern. */
return true;
else if (exp)
has_expansion_arg = true;
else
has_non_expansion_arg = true;
}
if (has_expansion_arg && has_non_expansion_arg)
return true;
}
return false;
}
/* [temp.variadic]/6 says that:
The instantiation of a pack expansion [...]
produces a list E1,E2, ..., En, where N is the number of elements
in the pack expansion parameters.
This subroutine of tsubst_pack_expansion produces one of these Ei.
PATTERN is the pattern of the pack expansion. PARM_PACKS is a
TREE_LIST in which each TREE_PURPOSE is a parameter pack of
PATTERN, and each TREE_VALUE is its corresponding argument pack.
INDEX is the index 'i' of the element Ei to produce. ARGS,
COMPLAIN, and IN_DECL are the same parameters as for the
tsubst_pack_expansion function.
The function returns the resulting Ei upon successful completion,
or error_mark_node.
Note that this function possibly modifies the ARGS parameter, so
it's the responsibility of the caller to restore it. */
static tree
gen_elem_of_pack_expansion_instantiation (tree pattern,
tree parm_packs,
unsigned index,
tree args /* This parm gets
modified. */,
tsubst_flags_t complain,
tree in_decl)
{
tree t;
bool ith_elem_is_expansion = false;
/* For each parameter pack, change the substitution of the parameter
pack to the ith argument in its argument pack, then expand the
pattern. */
for (tree pack = parm_packs; pack; pack = TREE_CHAIN (pack))
{
tree parm = TREE_PURPOSE (pack);
tree arg_pack = TREE_VALUE (pack);
tree aps; /* instance of ARGUMENT_PACK_SELECT. */
ith_elem_is_expansion |=
argument_pack_element_is_expansion_p (arg_pack, index);
/* Select the Ith argument from the pack. */
if (TREE_CODE (parm) == PARM_DECL
|| TREE_CODE (parm) == FIELD_DECL)
{
if (index == 0)
{
aps = make_argument_pack_select (arg_pack, index);
if (!mark_used (parm, complain) && !(complain & tf_error))
return error_mark_node;
register_local_specialization (aps, parm);
}
else
aps = retrieve_local_specialization (parm);
}
else
{
int idx, level;
template_parm_level_and_index (parm, &level, &idx);
if (index == 0)
{
aps = make_argument_pack_select (arg_pack, index);
/* Update the corresponding argument. */
TMPL_ARG (args, level, idx) = aps;
}
else
/* Re-use the ARGUMENT_PACK_SELECT. */
aps = TMPL_ARG (args, level, idx);
}
ARGUMENT_PACK_SELECT_INDEX (aps) = index;
}
/* Substitute into the PATTERN with the (possibly altered)
arguments. */
if (pattern == in_decl)
/* Expanding a fixed parameter pack from
coerce_template_parameter_pack. */
t = tsubst_decl (pattern, args, complain);
else if (pattern == error_mark_node)
t = error_mark_node;
else if (constraint_p (pattern))
{
if (processing_template_decl)
t = tsubst_constraint (pattern, args, complain, in_decl);
else
t = (constraints_satisfied_p (pattern, args)
? boolean_true_node : boolean_false_node);
}
else if (!TYPE_P (pattern))
t = tsubst_expr (pattern, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
else
t = tsubst (pattern, args, complain, in_decl);
/* If the Ith argument pack element is a pack expansion, then
the Ith element resulting from the substituting is going to
be a pack expansion as well. */
if (ith_elem_is_expansion)
t = make_pack_expansion (t);
return t;
}
/* When the unexpanded parameter pack in a fold expression expands to an empty
sequence, the value of the expression is as follows; the program is
ill-formed if the operator is not listed in this table.
&& true
|| false
, void() */
tree
expand_empty_fold (tree t, tsubst_flags_t complain)
{
tree_code code = (tree_code)TREE_INT_CST_LOW (TREE_OPERAND (t, 0));
if (!FOLD_EXPR_MODIFY_P (t))
switch (code)
{
case TRUTH_ANDIF_EXPR:
return boolean_true_node;
case TRUTH_ORIF_EXPR:
return boolean_false_node;
case COMPOUND_EXPR:
return void_node;
default:
break;
}
if (complain & tf_error)
error_at (location_of (t),
"fold of empty expansion over %O", code);
return error_mark_node;
}
/* Given a fold-expression T and a current LEFT and RIGHT operand,
form an expression that combines the two terms using the
operator of T. */
static tree
fold_expression (tree t, tree left, tree right, tsubst_flags_t complain)
{
tree op = FOLD_EXPR_OP (t);
tree_code code = (tree_code)TREE_INT_CST_LOW (op);
// Handle compound assignment operators.
if (FOLD_EXPR_MODIFY_P (t))
return build_x_modify_expr (input_location, left, code, right, complain);
switch (code)
{
case COMPOUND_EXPR:
return build_x_compound_expr (input_location, left, right, complain);
case DOTSTAR_EXPR:
return build_m_component_ref (left, right, complain);
default:
return build_x_binary_op (input_location, code,
left, TREE_CODE (left),
right, TREE_CODE (right),
/*overload=*/NULL,
complain);
}
}
/* Substitute ARGS into the pack of a fold expression T. */
static inline tree
tsubst_fold_expr_pack (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
return tsubst_pack_expansion (FOLD_EXPR_PACK (t), args, complain, in_decl);
}
/* Substitute ARGS into the pack of a fold expression T. */
static inline tree
tsubst_fold_expr_init (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
return tsubst_expr (FOLD_EXPR_INIT (t), args, complain, in_decl, false);
}
/* Expand a PACK of arguments into a grouped as left fold.
Given a pack containing elements A0, A1, ..., An and an
operator @, this builds the expression:
((A0 @ A1) @ A2) ... @ An
Note that PACK must not be empty.
The operator is defined by the original fold expression T. */
static tree
expand_left_fold (tree t, tree pack, tsubst_flags_t complain)
{
tree left = TREE_VEC_ELT (pack, 0);
for (int i = 1; i < TREE_VEC_LENGTH (pack); ++i)
{
tree right = TREE_VEC_ELT (pack, i);
left = fold_expression (t, left, right, complain);
}
return left;
}
/* Substitute into a unary left fold expression. */
static tree
tsubst_unary_left_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
return r;
}
if (TREE_VEC_LENGTH (pack) == 0)
return expand_empty_fold (t, complain);
else
return expand_left_fold (t, pack, complain);
}
/* Substitute into a binary left fold expression.
Do ths by building a single (non-empty) vector of argumnts and
building the expression from those elements. */
static tree
tsubst_binary_left_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
tree init = tsubst_fold_expr_init (t, args, complain, in_decl);
if (init == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
FOLD_EXPR_INIT (r) = init;
return r;
}
tree vec = make_tree_vec (TREE_VEC_LENGTH (pack) + 1);
TREE_VEC_ELT (vec, 0) = init;
for (int i = 0; i < TREE_VEC_LENGTH (pack); ++i)
TREE_VEC_ELT (vec, i + 1) = TREE_VEC_ELT (pack, i);
return expand_left_fold (t, vec, complain);
}
/* Expand a PACK of arguments into a grouped as right fold.
Given a pack containing elementns A0, A1, ..., and an
operator @, this builds the expression:
A0@ ... (An-2 @ (An-1 @ An))
Note that PACK must not be empty.
The operator is defined by the original fold expression T. */
tree
expand_right_fold (tree t, tree pack, tsubst_flags_t complain)
{
// Build the expression.
int n = TREE_VEC_LENGTH (pack);
tree right = TREE_VEC_ELT (pack, n - 1);
for (--n; n != 0; --n)
{
tree left = TREE_VEC_ELT (pack, n - 1);
right = fold_expression (t, left, right, complain);
}
return right;
}
/* Substitute into a unary right fold expression. */
static tree
tsubst_unary_right_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
return r;
}
if (TREE_VEC_LENGTH (pack) == 0)
return expand_empty_fold (t, complain);
else
return expand_right_fold (t, pack, complain);
}
/* Substitute into a binary right fold expression.
Do ths by building a single (non-empty) vector of arguments and
building the expression from those elements. */
static tree
tsubst_binary_right_fold (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl);
if (pack == error_mark_node)
return error_mark_node;
tree init = tsubst_fold_expr_init (t, args, complain, in_decl);
if (init == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (pack))
{
tree r = copy_node (t);
FOLD_EXPR_PACK (r) = pack;
FOLD_EXPR_INIT (r) = init;
return r;
}
int n = TREE_VEC_LENGTH (pack);
tree vec = make_tree_vec (n + 1);
for (int i = 0; i < n; ++i)
TREE_VEC_ELT (vec, i) = TREE_VEC_ELT (pack, i);
TREE_VEC_ELT (vec, n) = init;
return expand_right_fold (t, vec, complain);
}
/* Substitute ARGS into T, which is an pack expansion
(i.e. TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION). Returns a
TREE_VEC with the substituted arguments, a PACK_EXPANSION_* node
(if only a partial substitution could be performed) or
ERROR_MARK_NODE if there was an error. */
tree
tsubst_pack_expansion (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
tree pattern;
tree pack, packs = NULL_TREE;
bool unsubstituted_packs = false;
int i, len = -1;
tree result;
hash_map<tree, tree> *saved_local_specializations = NULL;
bool need_local_specializations = false;
int levels;
gcc_assert (PACK_EXPANSION_P (t));
pattern = PACK_EXPANSION_PATTERN (t);
/* Add in any args remembered from an earlier partial instantiation. */
args = add_to_template_args (PACK_EXPANSION_EXTRA_ARGS (t), args);
levels = TMPL_ARGS_DEPTH (args);
/* Determine the argument packs that will instantiate the parameter
packs used in the expansion expression. While we're at it,
compute the number of arguments to be expanded and make sure it
is consistent. */
for (pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack;
pack = TREE_CHAIN (pack))
{
tree parm_pack = TREE_VALUE (pack);
tree arg_pack = NULL_TREE;
tree orig_arg = NULL_TREE;
int level = 0;
if (TREE_CODE (parm_pack) == BASES)
{
if (BASES_DIRECT (parm_pack))
return calculate_direct_bases (tsubst_expr (BASES_TYPE (parm_pack),
args, complain, in_decl, false));
else
return calculate_bases (tsubst_expr (BASES_TYPE (parm_pack),
args, complain, in_decl, false));
}
if (TREE_CODE (parm_pack) == PARM_DECL)
{
/* We know we have correct local_specializations if this
expansion is at function scope, or if we're dealing with a
local parameter in a requires expression; for the latter,
tsubst_requires_expr set it up appropriately. */
if (PACK_EXPANSION_LOCAL_P (t) || CONSTRAINT_VAR_P (parm_pack))
arg_pack = retrieve_local_specialization (parm_pack);
else
/* We can't rely on local_specializations for a parameter
name used later in a function declaration (such as in a
late-specified return type). Even if it exists, it might
have the wrong value for a recursive call. */
need_local_specializations = true;
if (!arg_pack)
{
/* This parameter pack was used in an unevaluated context. Just
make a dummy decl, since it's only used for its type. */
arg_pack = tsubst_decl (parm_pack, args, complain);
if (arg_pack && DECL_PACK_P (arg_pack))
/* Partial instantiation of the parm_pack, we can't build
up an argument pack yet. */
arg_pack = NULL_TREE;
else
arg_pack = make_fnparm_pack (arg_pack);
}
}
else if (TREE_CODE (parm_pack) == FIELD_DECL)
arg_pack = tsubst_copy (parm_pack, args, complain, in_decl);
else
{
int idx;
template_parm_level_and_index (parm_pack, &level, &idx);
if (level <= levels)
arg_pack = TMPL_ARG (args, level, idx);
}
orig_arg = arg_pack;
if (arg_pack && TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT)
arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack);
if (arg_pack && !ARGUMENT_PACK_P (arg_pack))
/* This can only happen if we forget to expand an argument
pack somewhere else. Just return an error, silently. */
{
result = make_tree_vec (1);
TREE_VEC_ELT (result, 0) = error_mark_node;
return result;
}
if (arg_pack)
{
int my_len =
TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg_pack));
/* Don't bother trying to do a partial substitution with
incomplete packs; we'll try again after deduction. */
if (ARGUMENT_PACK_INCOMPLETE_P (arg_pack))
return t;
if (len < 0)
len = my_len;
else if (len != my_len)
{
if (!(complain & tf_error))
/* Fail quietly. */;
else if (TREE_CODE (t) == TYPE_PACK_EXPANSION)
error ("mismatched argument pack lengths while expanding "
"%<%T%>",
pattern);
else
error ("mismatched argument pack lengths while expanding "
"%<%E%>",
pattern);
return error_mark_node;
}
/* Keep track of the parameter packs and their corresponding
argument packs. */
packs = tree_cons (parm_pack, arg_pack, packs);
TREE_TYPE (packs) = orig_arg;
}
else
{
/* We can't substitute for this parameter pack. We use a flag as
well as the missing_level counter because function parameter
packs don't have a level. */
gcc_assert (processing_template_decl);
unsubstituted_packs = true;
}
}
/* If the expansion is just T..., return the matching argument pack, unless
we need to call convert_from_reference on all the elements. This is an
important optimization; see c++/68422. */
if (!unsubstituted_packs
&& TREE_PURPOSE (packs) == pattern)
{
tree args = ARGUMENT_PACK_ARGS (TREE_VALUE (packs));
/* Types need no adjustment, nor does sizeof..., and if we still have
some pack expansion args we won't do anything yet. */
if (TREE_CODE (t) == TYPE_PACK_EXPANSION
|| PACK_EXPANSION_SIZEOF_P (t)
|| pack_expansion_args_count (args))
return args;
/* Also optimize expression pack expansions if we can tell that the
elements won't have reference type. */
tree type = TREE_TYPE (pattern);
if (type && TREE_CODE (type) != REFERENCE_TYPE
&& !PACK_EXPANSION_P (type)
&& !WILDCARD_TYPE_P (type))
return args;
/* Otherwise use the normal path so we get convert_from_reference. */
}
/* We cannot expand this expansion expression, because we don't have
all of the argument packs we need. */
if (use_pack_expansion_extra_args_p (packs, len, unsubstituted_packs))
{
/* We got some full packs, but we can't substitute them in until we
have values for all the packs. So remember these until then. */
t = make_pack_expansion (pattern);
PACK_EXPANSION_EXTRA_ARGS (t) = args;
return t;
}
else if (unsubstituted_packs)
{
/* There were no real arguments, we're just replacing a parameter
pack with another version of itself. Substitute into the
pattern and return a PACK_EXPANSION_*. The caller will need to
deal with that. */
if (TREE_CODE (t) == EXPR_PACK_EXPANSION)
t = tsubst_expr (pattern, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
else
t = tsubst (pattern, args, complain, in_decl);
t = make_pack_expansion (t);
return t;
}
gcc_assert (len >= 0);
if (need_local_specializations)
{
/* We're in a late-specified return type, so create our own local
specializations map; the current map is either NULL or (in the
case of recursive unification) might have bindings that we don't
want to use or alter. */
saved_local_specializations = local_specializations;
local_specializations = new hash_map<tree, tree>;
}
/* For each argument in each argument pack, substitute into the
pattern. */
result = make_tree_vec (len);
tree elem_args = copy_template_args (args);
for (i = 0; i < len; ++i)
{
t = gen_elem_of_pack_expansion_instantiation (pattern, packs,
i,
elem_args, complain,
in_decl);
TREE_VEC_ELT (result, i) = t;
if (t == error_mark_node)
{
result = error_mark_node;
break;
}
}
/* Update ARGS to restore the substitution from parameter packs to
their argument packs. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
tree parm = TREE_PURPOSE (pack);
if (TREE_CODE (parm) == PARM_DECL
|| TREE_CODE (parm) == FIELD_DECL)
register_local_specialization (TREE_TYPE (pack), parm);
else
{
int idx, level;
if (TREE_VALUE (pack) == NULL_TREE)
continue;
template_parm_level_and_index (parm, &level, &idx);
/* Update the corresponding argument. */
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args))
TREE_VEC_ELT (TREE_VEC_ELT (args, level -1 ), idx) =
TREE_TYPE (pack);
else
TREE_VEC_ELT (args, idx) = TREE_TYPE (pack);
}
}
if (need_local_specializations)
{
delete local_specializations;
local_specializations = saved_local_specializations;
}
/* If the dependent pack arguments were such that we end up with only a
single pack expansion again, there's no need to keep it in a TREE_VEC. */
if (len == 1 && TREE_CODE (result) == TREE_VEC
&& PACK_EXPANSION_P (TREE_VEC_ELT (result, 0)))
return TREE_VEC_ELT (result, 0);
return result;
}
/* Given PARM_DECL PARM, find the corresponding PARM_DECL in the template
TMPL. We do this using DECL_PARM_INDEX, which should work even with
parameter packs; all parms generated from a function parameter pack will
have the same DECL_PARM_INDEX. */
tree
get_pattern_parm (tree parm, tree tmpl)
{
tree pattern = DECL_TEMPLATE_RESULT (tmpl);
tree patparm;
if (DECL_ARTIFICIAL (parm))
{
for (patparm = DECL_ARGUMENTS (pattern);
patparm; patparm = DECL_CHAIN (patparm))
if (DECL_ARTIFICIAL (patparm)
&& DECL_NAME (parm) == DECL_NAME (patparm))
break;
}
else
{
patparm = FUNCTION_FIRST_USER_PARM (DECL_TEMPLATE_RESULT (tmpl));
patparm = chain_index (DECL_PARM_INDEX (parm)-1, patparm);
gcc_assert (DECL_PARM_INDEX (patparm)
== DECL_PARM_INDEX (parm));
}
return patparm;
}
/* Make an argument pack out of the TREE_VEC VEC. */
static tree
make_argument_pack (tree vec)
{
tree pack;
tree elt = TREE_VEC_ELT (vec, 0);
if (TYPE_P (elt))
pack = cxx_make_type (TYPE_ARGUMENT_PACK);
else
{
pack = make_node (NONTYPE_ARGUMENT_PACK);
TREE_TYPE (pack) = TREE_TYPE (elt);
TREE_CONSTANT (pack) = 1;
}
SET_ARGUMENT_PACK_ARGS (pack, vec);
return pack;
}
/* Return an exact copy of template args T that can be modified
independently. */
static tree
copy_template_args (tree t)
{
if (t == error_mark_node)
return t;
int len = TREE_VEC_LENGTH (t);
tree new_vec = make_tree_vec (len);
for (int i = 0; i < len; ++i)
{
tree elt = TREE_VEC_ELT (t, i);
if (elt && TREE_CODE (elt) == TREE_VEC)
elt = copy_template_args (elt);
TREE_VEC_ELT (new_vec, i) = elt;
}
NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_vec)
= NON_DEFAULT_TEMPLATE_ARGS_COUNT (t);
return new_vec;
}
/* Substitute ARGS into the vector or list of template arguments T. */
static tree
tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
tree orig_t = t;
int len, need_new = 0, i, expanded_len_adjust = 0, out;
tree *elts;
if (t == error_mark_node)
return error_mark_node;
len = TREE_VEC_LENGTH (t);
elts = XALLOCAVEC (tree, len);
for (i = 0; i < len; i++)
{
tree orig_arg = TREE_VEC_ELT (t, i);
tree new_arg;
if (TREE_CODE (orig_arg) == TREE_VEC)
new_arg = tsubst_template_args (orig_arg, args, complain, in_decl);
else if (PACK_EXPANSION_P (orig_arg))
{
/* Substitute into an expansion expression. */
new_arg = tsubst_pack_expansion (orig_arg, args, complain, in_decl);
if (TREE_CODE (new_arg) == TREE_VEC)
/* Add to the expanded length adjustment the number of
expanded arguments. We subtract one from this
measurement, because the argument pack expression
itself is already counted as 1 in
LEN. EXPANDED_LEN_ADJUST can actually be negative, if
the argument pack is empty. */
expanded_len_adjust += TREE_VEC_LENGTH (new_arg) - 1;
}
else if (ARGUMENT_PACK_P (orig_arg))
{
/* Substitute into each of the arguments. */
new_arg = TYPE_P (orig_arg)
? cxx_make_type (TREE_CODE (orig_arg))
: make_node (TREE_CODE (orig_arg));
SET_ARGUMENT_PACK_ARGS (
new_arg,
tsubst_template_args (ARGUMENT_PACK_ARGS (orig_arg),
args, complain, in_decl));
if (ARGUMENT_PACK_ARGS (new_arg) == error_mark_node)
new_arg = error_mark_node;
if (TREE_CODE (new_arg) == NONTYPE_ARGUMENT_PACK) {
if (type_uses_auto (TREE_TYPE (orig_arg)))
TREE_TYPE (new_arg) = TREE_TYPE (orig_arg);
else
TREE_TYPE (new_arg) = tsubst (TREE_TYPE (orig_arg), args,
complain, in_decl);
TREE_CONSTANT (new_arg) = TREE_CONSTANT (orig_arg);
if (TREE_TYPE (new_arg) == error_mark_node)
new_arg = error_mark_node;
}
}
else
new_arg = tsubst_template_arg (orig_arg, args, complain, in_decl);
if (new_arg == error_mark_node)
return error_mark_node;
elts[i] = new_arg;
if (new_arg != orig_arg)
need_new = 1;
}
if (!need_new)
return t;
/* Make space for the expanded arguments coming from template
argument packs. */
t = make_tree_vec (len + expanded_len_adjust);
/* ORIG_T can contain TREE_VECs. That happens if ORIG_T contains the
arguments for a member template.
In that case each TREE_VEC in ORIG_T represents a level of template
arguments, and ORIG_T won't carry any non defaulted argument count.
It will rather be the nested TREE_VECs that will carry one.
In other words, ORIG_T carries a non defaulted argument count only
if it doesn't contain any nested TREE_VEC. */
if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t))
{
int count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t);
count += expanded_len_adjust;
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (t, count);
}
for (i = 0, out = 0; i < len; i++)
{
if ((PACK_EXPANSION_P (TREE_VEC_ELT (orig_t, i))
|| ARGUMENT_PACK_P (TREE_VEC_ELT (orig_t, i)))
&& TREE_CODE (elts[i]) == TREE_VEC)
{
int idx;
/* Now expand the template argument pack "in place". */
for (idx = 0; idx < TREE_VEC_LENGTH (elts[i]); idx++, out++)
TREE_VEC_ELT (t, out) = TREE_VEC_ELT (elts[i], idx);
}
else
{
TREE_VEC_ELT (t, out) = elts[i];
out++;
}
}
return t;
}
/* Substitute ARGS into one level PARMS of template parameters. */
static tree
tsubst_template_parms_level (tree parms, tree args, tsubst_flags_t complain)
{
if (parms == error_mark_node)
return error_mark_node;
tree new_vec = make_tree_vec (TREE_VEC_LENGTH (parms));
for (int i = 0; i < TREE_VEC_LENGTH (new_vec); ++i)
{
tree tuple = TREE_VEC_ELT (parms, i);
if (tuple == error_mark_node)
continue;
TREE_VEC_ELT (new_vec, i) =
tsubst_template_parm (tuple, args, complain);
}
return new_vec;
}
/* Return the result of substituting ARGS into the template parameters
given by PARMS. If there are m levels of ARGS and m + n levels of
PARMS, then the result will contain n levels of PARMS. For
example, if PARMS is `template <class T> template <class U>
template <T*, U, class V>' and ARGS is {{int}, {double}} then the
result will be `template <int*, double, class V>'. */
static tree
tsubst_template_parms (tree parms, tree args, tsubst_flags_t complain)
{
tree r = NULL_TREE;
tree* new_parms;
/* When substituting into a template, we must set
PROCESSING_TEMPLATE_DECL as the template parameters may be
dependent if they are based on one-another, and the dependency
predicates are short-circuit outside of templates. */
++processing_template_decl;
for (new_parms = &r;
parms && TMPL_PARMS_DEPTH (parms) > TMPL_ARGS_DEPTH (args);
new_parms = &(TREE_CHAIN (*new_parms)),
parms = TREE_CHAIN (parms))
{
tree new_vec = tsubst_template_parms_level (TREE_VALUE (parms),
args, complain);
*new_parms =
tree_cons (size_int (TMPL_PARMS_DEPTH (parms)
- TMPL_ARGS_DEPTH (args)),
new_vec, NULL_TREE);
}
--processing_template_decl;
return r;
}
/* Return the result of substituting ARGS into one template parameter
given by T. T Must be a TREE_LIST which TREE_VALUE is the template
parameter and which TREE_PURPOSE is the default argument of the
template parameter. */
static tree
tsubst_template_parm (tree t, tree args, tsubst_flags_t complain)
{
tree default_value, parm_decl;
if (args == NULL_TREE
|| t == NULL_TREE
|| t == error_mark_node)
return t;
gcc_assert (TREE_CODE (t) == TREE_LIST);
default_value = TREE_PURPOSE (t);
parm_decl = TREE_VALUE (t);
parm_decl = tsubst (parm_decl, args, complain, NULL_TREE);
if (TREE_CODE (parm_decl) == PARM_DECL
&& invalid_nontype_parm_type_p (TREE_TYPE (parm_decl), complain))
parm_decl = error_mark_node;
default_value = tsubst_template_arg (default_value, args,
complain, NULL_TREE);
return build_tree_list (default_value, parm_decl);
}
/* Substitute the ARGS into the indicated aggregate (or enumeration)
type T. If T is not an aggregate or enumeration type, it is
handled as if by tsubst. IN_DECL is as for tsubst. If
ENTERING_SCOPE is nonzero, T is the context for a template which
we are presently tsubst'ing. Return the substituted value. */
static tree
tsubst_aggr_type (tree t,
tree args,
tsubst_flags_t complain,
tree in_decl,
int entering_scope)
{
if (t == NULL_TREE)
return NULL_TREE;
switch (TREE_CODE (t))
{
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
return tsubst (TYPE_PTRMEMFUNC_FN_TYPE (t), args, complain, in_decl);
/* Fall through. */
case ENUMERAL_TYPE:
case UNION_TYPE:
if (TYPE_TEMPLATE_INFO (t) && uses_template_parms (t))
{
tree argvec;
tree context;
tree r;
int saved_unevaluated_operand;
int saved_inhibit_evaluation_warnings;
/* In "sizeof(X<I>)" we need to evaluate "I". */
saved_unevaluated_operand = cp_unevaluated_operand;
cp_unevaluated_operand = 0;
saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
c_inhibit_evaluation_warnings = 0;
/* First, determine the context for the type we are looking
up. */
context = TYPE_CONTEXT (t);
if (context && TYPE_P (context))
{
context = tsubst_aggr_type (context, args, complain,
in_decl, /*entering_scope=*/1);
/* If context is a nested class inside a class template,
it may still need to be instantiated (c++/33959). */
context = complete_type (context);
}
/* Then, figure out what arguments are appropriate for the
type we are trying to find. For example, given:
template <class T> struct S;
template <class T, class U> void f(T, U) { S<U> su; }
and supposing that we are instantiating f<int, double>,
then our ARGS will be {int, double}, but, when looking up
S we only want {double}. */
argvec = tsubst_template_args (TYPE_TI_ARGS (t), args,
complain, in_decl);
if (argvec == error_mark_node)
r = error_mark_node;
else
{
r = lookup_template_class (t, argvec, in_decl, context,
entering_scope, complain);
r = cp_build_qualified_type_real (r, cp_type_quals (t), complain);
}
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
return r;
}
else
/* This is not a template type, so there's nothing to do. */
return t;
default:
return tsubst (t, args, complain, in_decl);
}
}
/* Substitute into the default argument ARG (a default argument for
FN), which has the indicated TYPE. */
tree
tsubst_default_argument (tree fn, tree type, tree arg, tsubst_flags_t complain)
{
tree saved_class_ptr = NULL_TREE;
tree saved_class_ref = NULL_TREE;
int errs = errorcount + sorrycount;
/* This can happen in invalid code. */
if (TREE_CODE (arg) == DEFAULT_ARG)
return arg;
/* This default argument came from a template. Instantiate the
default argument here, not in tsubst. In the case of
something like:
template <class T>
struct S {
static T t();
void f(T = t());
};
we must be careful to do name lookup in the scope of S<T>,
rather than in the current class. */
push_access_scope (fn);
/* The "this" pointer is not valid in a default argument. */
if (cfun)
{
saved_class_ptr = current_class_ptr;
cp_function_chain->x_current_class_ptr = NULL_TREE;
saved_class_ref = current_class_ref;
cp_function_chain->x_current_class_ref = NULL_TREE;
}
push_deferring_access_checks(dk_no_deferred);
/* The default argument expression may cause implicitly defined
member functions to be synthesized, which will result in garbage
collection. We must treat this situation as if we were within
the body of function so as to avoid collecting live data on the
stack. */
++function_depth;
arg = tsubst_expr (arg, DECL_TI_ARGS (fn),
complain, NULL_TREE,
/*integral_constant_expression_p=*/false);
--function_depth;
pop_deferring_access_checks();
/* Restore the "this" pointer. */
if (cfun)
{
cp_function_chain->x_current_class_ptr = saved_class_ptr;
cp_function_chain->x_current_class_ref = saved_class_ref;
}
if (errorcount+sorrycount > errs
&& (complain & tf_warning_or_error))
inform (input_location,
" when instantiating default argument for call to %D", fn);
/* Make sure the default argument is reasonable. */
arg = check_default_argument (type, arg, complain);
pop_access_scope (fn);
return arg;
}
/* Substitute into all the default arguments for FN. */
static void
tsubst_default_arguments (tree fn, tsubst_flags_t complain)
{
tree arg;
tree tmpl_args;
tmpl_args = DECL_TI_ARGS (fn);
/* If this function is not yet instantiated, we certainly don't need
its default arguments. */
if (uses_template_parms (tmpl_args))
return;
/* Don't do this again for clones. */
if (DECL_CLONED_FUNCTION_P (fn))
return;
for (arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
arg;
arg = TREE_CHAIN (arg))
if (TREE_PURPOSE (arg))
TREE_PURPOSE (arg) = tsubst_default_argument (fn,
TREE_VALUE (arg),
TREE_PURPOSE (arg),
complain);
}
/* Substitute the ARGS into the T, which is a _DECL. Return the
result of the substitution. Issue error and warning messages under
control of COMPLAIN. */
static tree
tsubst_decl (tree t, tree args, tsubst_flags_t complain)
{
#define RETURN(EXP) do { r = (EXP); goto out; } while(0)
location_t saved_loc;
tree r = NULL_TREE;
tree in_decl = t;
hashval_t hash = 0;
/* Set the filename and linenumber to improve error-reporting. */
saved_loc = input_location;
input_location = DECL_SOURCE_LOCATION (t);
switch (TREE_CODE (t))
{
case TEMPLATE_DECL:
{
/* We can get here when processing a member function template,
member class template, or template template parameter. */
tree decl = DECL_TEMPLATE_RESULT (t);
tree spec;
tree tmpl_args;
tree full_args;
if (DECL_TEMPLATE_TEMPLATE_PARM_P (t))
{
/* Template template parameter is treated here. */
tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (new_type == error_mark_node)
r = error_mark_node;
/* If we get a real template back, return it. This can happen in
the context of most_specialized_partial_spec. */
else if (TREE_CODE (new_type) == TEMPLATE_DECL)
r = new_type;
else
/* The new TEMPLATE_DECL was built in
reduce_template_parm_level. */
r = TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (new_type);
break;
}
/* We might already have an instance of this template.
The ARGS are for the surrounding class type, so the
full args contain the tsubst'd args for the context,
plus the innermost args from the template decl. */
tmpl_args = DECL_CLASS_TEMPLATE_P (t)
? CLASSTYPE_TI_ARGS (TREE_TYPE (t))
: DECL_TI_ARGS (DECL_TEMPLATE_RESULT (t));
/* Because this is a template, the arguments will still be
dependent, even after substitution. If
PROCESSING_TEMPLATE_DECL is not set, the dependency
predicates will short-circuit. */
++processing_template_decl;
full_args = tsubst_template_args (tmpl_args, args,
complain, in_decl);
--processing_template_decl;
if (full_args == error_mark_node)
RETURN (error_mark_node);
/* If this is a default template template argument,
tsubst might not have changed anything. */
if (full_args == tmpl_args)
RETURN (t);
hash = hash_tmpl_and_args (t, full_args);
spec = retrieve_specialization (t, full_args, hash);
if (spec != NULL_TREE)
{
r = spec;
break;
}
/* Make a new template decl. It will be similar to the
original, but will record the current template arguments.
We also create a new function declaration, which is just
like the old one, but points to this new template, rather
than the old one. */
r = copy_decl (t);
gcc_assert (DECL_LANG_SPECIFIC (r) != 0);
DECL_CHAIN (r) = NULL_TREE;
// Build new template info linking to the original template decl.
DECL_TEMPLATE_INFO (r) = build_template_info (t, args);
if (TREE_CODE (decl) == TYPE_DECL
&& !TYPE_DECL_ALIAS_P (decl))
{
tree new_type;
++processing_template_decl;
new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
--processing_template_decl;
if (new_type == error_mark_node)
RETURN (error_mark_node);
TREE_TYPE (r) = new_type;
/* For a partial specialization, we need to keep pointing to
the primary template. */
if (!DECL_TEMPLATE_SPECIALIZATION (t))
CLASSTYPE_TI_TEMPLATE (new_type) = r;
DECL_TEMPLATE_RESULT (r) = TYPE_MAIN_DECL (new_type);
DECL_TI_ARGS (r) = CLASSTYPE_TI_ARGS (new_type);
DECL_CONTEXT (r) = TYPE_CONTEXT (new_type);
}
else
{
tree new_decl;
++processing_template_decl;
new_decl = tsubst (decl, args, complain, in_decl);
--processing_template_decl;
if (new_decl == error_mark_node)
RETURN (error_mark_node);
DECL_TEMPLATE_RESULT (r) = new_decl;
DECL_TI_TEMPLATE (new_decl) = r;
TREE_TYPE (r) = TREE_TYPE (new_decl);
DECL_TI_ARGS (r) = DECL_TI_ARGS (new_decl);
DECL_CONTEXT (r) = DECL_CONTEXT (new_decl);
}
SET_DECL_IMPLICIT_INSTANTIATION (r);
DECL_TEMPLATE_INSTANTIATIONS (r) = NULL_TREE;
DECL_TEMPLATE_SPECIALIZATIONS (r) = NULL_TREE;
/* The template parameters for this new template are all the
template parameters for the old template, except the
outermost level of parameters. */
DECL_TEMPLATE_PARMS (r)
= tsubst_template_parms (DECL_TEMPLATE_PARMS (t), args,
complain);
if (PRIMARY_TEMPLATE_P (t))
DECL_PRIMARY_TEMPLATE (r) = r;
if (TREE_CODE (decl) != TYPE_DECL && !VAR_P (decl))
/* Record this non-type partial instantiation. */
register_specialization (r, t,
DECL_TI_ARGS (DECL_TEMPLATE_RESULT (r)),
false, hash);
}
break;
case FUNCTION_DECL:
{
tree ctx;
tree argvec = NULL_TREE;
tree *friends;
tree gen_tmpl;
tree type;
int member;
int args_depth;
int parms_depth;
/* Nobody should be tsubst'ing into non-template functions. */
gcc_assert (DECL_TEMPLATE_INFO (t) != NULL_TREE);
if (TREE_CODE (DECL_TI_TEMPLATE (t)) == TEMPLATE_DECL)
{
tree spec;
/* If T is not dependent, just return it. */
if (!uses_template_parms (DECL_TI_ARGS (t)))
RETURN (t);
/* Calculate the most general template of which R is a
specialization, and the complete set of arguments used to
specialize R. */
gen_tmpl = most_general_template (DECL_TI_TEMPLATE (t));
argvec = tsubst_template_args (DECL_TI_ARGS
(DECL_TEMPLATE_RESULT
(DECL_TI_TEMPLATE (t))),
args, complain, in_decl);
if (argvec == error_mark_node)
RETURN (error_mark_node);
/* Check to see if we already have this specialization. */
hash = hash_tmpl_and_args (gen_tmpl, argvec);
spec = retrieve_specialization (gen_tmpl, argvec, hash);
if (spec)
{
r = spec;
break;
}
/* We can see more levels of arguments than parameters if
there was a specialization of a member template, like
this:
template <class T> struct S { template <class U> void f(); }
template <> template <class U> void S<int>::f(U);
Here, we'll be substituting into the specialization,
because that's where we can find the code we actually
want to generate, but we'll have enough arguments for
the most general template.
We also deal with the peculiar case:
template <class T> struct S {
template <class U> friend void f();
};
template <class U> void f() {}
template S<int>;
template void f<double>();
Here, the ARGS for the instantiation of will be {int,
double}. But, we only need as many ARGS as there are
levels of template parameters in CODE_PATTERN. We are
careful not to get fooled into reducing the ARGS in
situations like:
template <class T> struct S { template <class U> void f(U); }
template <class T> template <> void S<T>::f(int) {}
which we can spot because the pattern will be a
specialization in this case. */
args_depth = TMPL_ARGS_DEPTH (args);
parms_depth =
TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (t)));
if (args_depth > parms_depth
&& !DECL_TEMPLATE_SPECIALIZATION (t))
args = get_innermost_template_args (args, parms_depth);
}
else
{
/* This special case arises when we have something like this:
template <class T> struct S {
friend void f<int>(int, double);
};
Here, the DECL_TI_TEMPLATE for the friend declaration
will be an IDENTIFIER_NODE. We are being called from
tsubst_friend_function, and we want only to create a
new decl (R) with appropriate types so that we can call
determine_specialization. */
gen_tmpl = NULL_TREE;
}
if (DECL_CLASS_SCOPE_P (t))
{
if (DECL_NAME (t) == constructor_name (DECL_CONTEXT (t)))
member = 2;
else
member = 1;
ctx = tsubst_aggr_type (DECL_CONTEXT (t), args,
complain, t, /*entering_scope=*/1);
}
else
{
member = 0;
ctx = DECL_CONTEXT (t);
}
type = tsubst (TREE_TYPE (t), args, complain|tf_fndecl_type, in_decl);
if (type == error_mark_node)
RETURN (error_mark_node);
/* If we hit excessive deduction depth, the type is bogus even if
it isn't error_mark_node, so don't build a decl. */
if (excessive_deduction_depth)
RETURN (error_mark_node);
/* We do NOT check for matching decls pushed separately at this
point, as they may not represent instantiations of this
template, and in any case are considered separate under the
discrete model. */
r = copy_decl (t);
DECL_USE_TEMPLATE (r) = 0;
TREE_TYPE (r) = type;
/* Clear out the mangled name and RTL for the instantiation. */
SET_DECL_ASSEMBLER_NAME (r, NULL_TREE);
SET_DECL_RTL (r, NULL);
/* Leave DECL_INITIAL set on deleted instantiations. */
if (!DECL_DELETED_FN (r))
DECL_INITIAL (r) = NULL_TREE;
DECL_CONTEXT (r) = ctx;
/* OpenMP UDRs have the only argument a reference to the declared
type. We want to diagnose if the declared type is a reference,
which is invalid, but as references to references are usually
quietly merged, diagnose it here. */
if (DECL_OMP_DECLARE_REDUCTION_P (t))
{
tree argtype
= TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t))));
argtype = tsubst (argtype, args, complain, in_decl);
if (TREE_CODE (argtype) == REFERENCE_TYPE)
error_at (DECL_SOURCE_LOCATION (t),
"reference type %qT in "
"%<#pragma omp declare reduction%>", argtype);
if (strchr (IDENTIFIER_POINTER (DECL_NAME (t)), '~') == NULL)
DECL_NAME (r) = omp_reduction_id (ERROR_MARK, DECL_NAME (t),
argtype);
}
if (member && DECL_CONV_FN_P (r))
/* Type-conversion operator. Reconstruct the name, in
case it's the name of one of the template's parameters. */
DECL_NAME (r) = mangle_conv_op_name_for_type (TREE_TYPE (type));
DECL_ARGUMENTS (r) = tsubst (DECL_ARGUMENTS (t), args,
complain, t);
DECL_RESULT (r) = NULL_TREE;
TREE_STATIC (r) = 0;
TREE_PUBLIC (r) = TREE_PUBLIC (t);
DECL_EXTERNAL (r) = 1;
/* If this is an instantiation of a function with internal
linkage, we already know what object file linkage will be
assigned to the instantiation. */
DECL_INTERFACE_KNOWN (r) = !TREE_PUBLIC (r);
DECL_DEFER_OUTPUT (r) = 0;
DECL_CHAIN (r) = NULL_TREE;
DECL_PENDING_INLINE_INFO (r) = 0;
DECL_PENDING_INLINE_P (r) = 0;
DECL_SAVED_TREE (r) = NULL_TREE;
DECL_STRUCT_FUNCTION (r) = NULL;
TREE_USED (r) = 0;
/* We'll re-clone as appropriate in instantiate_template. */
DECL_CLONED_FUNCTION (r) = NULL_TREE;
/* If we aren't complaining now, return on error before we register
the specialization so that we'll complain eventually. */
if ((complain & tf_error) == 0
&& IDENTIFIER_OPNAME_P (DECL_NAME (r))
&& !grok_op_properties (r, /*complain=*/false))
RETURN (error_mark_node);
/* When instantiating a constrained member, substitute
into the constraints to create a new constraint. */
if (tree ci = get_constraints (t))
if (member)
{
ci = tsubst_constraint_info (ci, argvec, complain, NULL_TREE);
set_constraints (r, ci);
}
/* Set up the DECL_TEMPLATE_INFO for R. There's no need to do
this in the special friend case mentioned above where
GEN_TMPL is NULL. */
if (gen_tmpl)
{
DECL_TEMPLATE_INFO (r)
= build_template_info (gen_tmpl, argvec);
SET_DECL_IMPLICIT_INSTANTIATION (r);
tree new_r
= register_specialization (r, gen_tmpl, argvec, false, hash);
if (new_r != r)
/* We instantiated this while substituting into
the type earlier (template/friend54.C). */
RETURN (new_r);
/* We're not supposed to instantiate default arguments
until they are called, for a template. But, for a
declaration like:
template <class T> void f ()
{ extern void g(int i = T()); }
we should do the substitution when the template is
instantiated. We handle the member function case in
instantiate_class_template since the default arguments
might refer to other members of the class. */
if (!member
&& !PRIMARY_TEMPLATE_P (gen_tmpl)
&& !uses_template_parms (argvec))
tsubst_default_arguments (r, complain);
}
else
DECL_TEMPLATE_INFO (r) = NULL_TREE;
/* Copy the list of befriending classes. */
for (friends = &DECL_BEFRIENDING_CLASSES (r);
*friends;
friends = &TREE_CHAIN (*friends))
{
*friends = copy_node (*friends);
TREE_VALUE (*friends) = tsubst (TREE_VALUE (*friends),
args, complain,
in_decl);
}
if (DECL_CONSTRUCTOR_P (r) || DECL_DESTRUCTOR_P (r))
{
maybe_retrofit_in_chrg (r);
if (DECL_CONSTRUCTOR_P (r))
grok_ctor_properties (ctx, r);
/* If this is an instantiation of a member template, clone it.
If it isn't, that'll be handled by
clone_constructors_and_destructors. */
if (PRIMARY_TEMPLATE_P (gen_tmpl))
clone_function_decl (r, /*update_method_vec_p=*/0);
}
else if ((complain & tf_error) != 0
&& IDENTIFIER_OPNAME_P (DECL_NAME (r))
&& !grok_op_properties (r, /*complain=*/true))
RETURN (error_mark_node);
if (DECL_FRIEND_P (t) && DECL_FRIEND_CONTEXT (t))
SET_DECL_FRIEND_CONTEXT (r,
tsubst (DECL_FRIEND_CONTEXT (t),
args, complain, in_decl));
/* Possibly limit visibility based on template args. */
DECL_VISIBILITY (r) = VISIBILITY_DEFAULT;
if (DECL_VISIBILITY_SPECIFIED (t))
{
DECL_VISIBILITY_SPECIFIED (r) = 0;
DECL_ATTRIBUTES (r)
= remove_attribute ("visibility", DECL_ATTRIBUTES (r));
}
determine_visibility (r);
if (DECL_DEFAULTED_OUTSIDE_CLASS_P (r)
&& !processing_template_decl)
defaulted_late_check (r);
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0,
args, complain, in_decl);
}
break;
case PARM_DECL:
{
tree type = NULL_TREE;
int i, len = 1;
tree expanded_types = NULL_TREE;
tree prev_r = NULL_TREE;
tree first_r = NULL_TREE;
if (DECL_PACK_P (t))
{
/* If there is a local specialization that isn't a
parameter pack, it means that we're doing a "simple"
substitution from inside tsubst_pack_expansion. Just
return the local specialization (which will be a single
parm). */
tree spec = retrieve_local_specialization (t);
if (spec
&& TREE_CODE (spec) == PARM_DECL
&& TREE_CODE (TREE_TYPE (spec)) != TYPE_PACK_EXPANSION)
RETURN (spec);
/* Expand the TYPE_PACK_EXPANSION that provides the types for
the parameters in this function parameter pack. */
expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args,
complain, in_decl);
if (TREE_CODE (expanded_types) == TREE_VEC)
{
len = TREE_VEC_LENGTH (expanded_types);
/* Zero-length parameter packs are boring. Just substitute
into the chain. */
if (len == 0)
RETURN (tsubst (TREE_CHAIN (t), args, complain,
TREE_CHAIN (t)));
}
else
{
/* All we did was update the type. Make a note of that. */
type = expanded_types;
expanded_types = NULL_TREE;
}
}
/* Loop through all of the parameters we'll build. When T is
a function parameter pack, LEN is the number of expanded
types in EXPANDED_TYPES; otherwise, LEN is 1. */
r = NULL_TREE;
for (i = 0; i < len; ++i)
{
prev_r = r;
r = copy_node (t);
if (DECL_TEMPLATE_PARM_P (t))
SET_DECL_TEMPLATE_PARM_P (r);
if (expanded_types)
/* We're on the Ith parameter of the function parameter
pack. */
{
/* Get the Ith type. */
type = TREE_VEC_ELT (expanded_types, i);
/* Rename the parameter to include the index. */
DECL_NAME (r)
= make_ith_pack_parameter_name (DECL_NAME (r), i);
}
else if (!type)
/* We're dealing with a normal parameter. */
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
type = type_decays_to (type);
TREE_TYPE (r) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), r);
if (DECL_INITIAL (r))
{
if (TREE_CODE (DECL_INITIAL (r)) != TEMPLATE_PARM_INDEX)
DECL_INITIAL (r) = TREE_TYPE (r);
else
DECL_INITIAL (r) = tsubst (DECL_INITIAL (r), args,
complain, in_decl);
}
DECL_CONTEXT (r) = NULL_TREE;
if (!DECL_TEMPLATE_PARM_P (r))
DECL_ARG_TYPE (r) = type_passed_as (type);
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0,
args, complain, in_decl);
/* Keep track of the first new parameter we
generate. That's what will be returned to the
caller. */
if (!first_r)
first_r = r;
/* Build a proper chain of parameters when substituting
into a function parameter pack. */
if (prev_r)
DECL_CHAIN (prev_r) = r;
}
/* If cp_unevaluated_operand is set, we're just looking for a
single dummy parameter, so don't keep going. */
if (DECL_CHAIN (t) && !cp_unevaluated_operand)
DECL_CHAIN (r) = tsubst (DECL_CHAIN (t), args,
complain, DECL_CHAIN (t));
/* FIRST_R contains the start of the chain we've built. */
r = first_r;
}
break;
case FIELD_DECL:
{
tree type = NULL_TREE;
tree vec = NULL_TREE;
tree expanded_types = NULL_TREE;
int len = 1;
if (PACK_EXPANSION_P (TREE_TYPE (t)))
{
/* This field is a lambda capture pack. Return a TREE_VEC of
the expanded fields to instantiate_class_template_1 and
store them in the specializations hash table as a
NONTYPE_ARGUMENT_PACK so that tsubst_copy can find them. */
expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args,
complain, in_decl);
if (TREE_CODE (expanded_types) == TREE_VEC)
{
len = TREE_VEC_LENGTH (expanded_types);
vec = make_tree_vec (len);
}
else
{
/* All we did was update the type. Make a note of that. */
type = expanded_types;
expanded_types = NULL_TREE;
}
}
for (int i = 0; i < len; ++i)
{
r = copy_decl (t);
if (expanded_types)
{
type = TREE_VEC_ELT (expanded_types, i);
DECL_NAME (r)
= make_ith_pack_parameter_name (DECL_NAME (r), i);
}
else if (!type)
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (type == error_mark_node)
RETURN (error_mark_node);
TREE_TYPE (r) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), r);
if (DECL_C_BIT_FIELD (r))
/* For bit-fields, DECL_INITIAL gives the number of bits. For
non-bit-fields DECL_INITIAL is a non-static data member
initializer, which gets deferred instantiation. */
DECL_INITIAL (r)
= tsubst_expr (DECL_INITIAL (t), args,
complain, in_decl,
/*integral_constant_expression_p=*/true);
else if (DECL_INITIAL (t))
{
/* Set up DECL_TEMPLATE_INFO so that we can get at the
NSDMI in perform_member_init. Still set DECL_INITIAL
so that we know there is one. */
DECL_INITIAL (r) = void_node;
gcc_assert (DECL_LANG_SPECIFIC (r) == NULL);
retrofit_lang_decl (r);
DECL_TEMPLATE_INFO (r) = build_template_info (t, args);
}
/* We don't have to set DECL_CONTEXT here; it is set by
finish_member_declaration. */
DECL_CHAIN (r) = NULL_TREE;
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0,
args, complain, in_decl);
if (vec)
TREE_VEC_ELT (vec, i) = r;
}
if (vec)
{
r = vec;
tree pack = make_node (NONTYPE_ARGUMENT_PACK);
tree tpack = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (pack, vec);
SET_ARGUMENT_PACK_ARGS (tpack, expanded_types);
TREE_TYPE (pack) = tpack;
register_specialization (pack, t, args, false, 0);
}
}
break;
case USING_DECL:
/* We reach here only for member using decls. We also need to check
uses_template_parms because DECL_DEPENDENT_P is not set for a
using-declaration that designates a member of the current
instantiation (c++/53549). */
if (DECL_DEPENDENT_P (t)
|| uses_template_parms (USING_DECL_SCOPE (t)))
{
tree scope = USING_DECL_SCOPE (t);
tree name = tsubst_copy (DECL_NAME (t), args, complain, in_decl);
if (PACK_EXPANSION_P (scope))
{
tree vec = tsubst_pack_expansion (scope, args, complain, in_decl);
int len = TREE_VEC_LENGTH (vec);
r = make_tree_vec (len);
for (int i = 0; i < len; ++i)
{
tree escope = TREE_VEC_ELT (vec, i);
tree elt = do_class_using_decl (escope, name);
if (!elt)
{
r = error_mark_node;
break;
}
else
{
TREE_PROTECTED (elt) = TREE_PROTECTED (t);
TREE_PRIVATE (elt) = TREE_PRIVATE (t);
}
TREE_VEC_ELT (r, i) = elt;
}
}
else
{
tree inst_scope = tsubst_copy (USING_DECL_SCOPE (t), args,
complain, in_decl);
r = do_class_using_decl (inst_scope, name);
if (!r)
r = error_mark_node;
else
{
TREE_PROTECTED (r) = TREE_PROTECTED (t);
TREE_PRIVATE (r) = TREE_PRIVATE (t);
}
}
}
else
{
r = copy_node (t);
DECL_CHAIN (r) = NULL_TREE;
}
break;
case TYPE_DECL:
case VAR_DECL:
{
tree argvec = NULL_TREE;
tree gen_tmpl = NULL_TREE;
tree spec;
tree tmpl = NULL_TREE;
tree ctx;
tree type = NULL_TREE;
bool local_p;
if (TREE_TYPE (t) == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (t) == TYPE_DECL
&& t == TYPE_MAIN_DECL (TREE_TYPE (t)))
{
/* If this is the canonical decl, we don't have to
mess with instantiations, and often we can't (for
typename, template type parms and such). Note that
TYPE_NAME is not correct for the above test if
we've copied the type for a typedef. */
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (type == error_mark_node)
RETURN (error_mark_node);
r = TYPE_NAME (type);
break;
}
/* Check to see if we already have the specialization we
need. */
spec = NULL_TREE;
if (DECL_CLASS_SCOPE_P (t) || DECL_NAMESPACE_SCOPE_P (t))
{
/* T is a static data member or namespace-scope entity.
We have to substitute into namespace-scope variables
(not just variable templates) because of cases like:
template <class T> void f() { extern T t; }
where the entity referenced is not known until
instantiation time. */
local_p = false;
ctx = DECL_CONTEXT (t);
if (DECL_CLASS_SCOPE_P (t))
{
ctx = tsubst_aggr_type (ctx, args,
complain,
in_decl, /*entering_scope=*/1);
/* If CTX is unchanged, then T is in fact the
specialization we want. That situation occurs when
referencing a static data member within in its own
class. We can use pointer equality, rather than
same_type_p, because DECL_CONTEXT is always
canonical... */
if (ctx == DECL_CONTEXT (t)
/* ... unless T is a member template; in which
case our caller can be willing to create a
specialization of that template represented
by T. */
&& !(DECL_TI_TEMPLATE (t)
&& DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (t))))
spec = t;
}
if (!spec)
{
tmpl = DECL_TI_TEMPLATE (t);
gen_tmpl = most_general_template (tmpl);
argvec = tsubst (DECL_TI_ARGS (t), args, complain, in_decl);
if (argvec != error_mark_node)
argvec = (coerce_innermost_template_parms
(DECL_TEMPLATE_PARMS (gen_tmpl),
argvec, t, complain,
/*all*/true, /*defarg*/true));
if (argvec == error_mark_node)
RETURN (error_mark_node);
hash = hash_tmpl_and_args (gen_tmpl, argvec);
spec = retrieve_specialization (gen_tmpl, argvec, hash);
}
}
else
{
/* A local variable. */
local_p = true;
/* Subsequent calls to pushdecl will fill this in. */
ctx = NULL_TREE;
/* Unless this is a reference to a static variable from an
enclosing function, in which case we need to fill it in now. */
if (TREE_STATIC (t))
{
tree fn = tsubst (DECL_CONTEXT (t), args, complain, in_decl);
if (fn != current_function_decl)
ctx = fn;
}
spec = retrieve_local_specialization (t);
}
/* If we already have the specialization we need, there is
nothing more to do. */
if (spec)
{
r = spec;
break;
}
/* Create a new node for the specialization we need. */
r = copy_decl (t);
if (type == NULL_TREE)
{
if (is_typedef_decl (t))
type = DECL_ORIGINAL_TYPE (t);
else
type = TREE_TYPE (t);
if (VAR_P (t)
&& VAR_HAD_UNKNOWN_BOUND (t)
&& type != error_mark_node)
type = strip_array_domain (type);
tree sub_args = args;
if (tree auto_node = type_uses_auto (type))
{
/* Mask off any template args past the variable's context so we
don't replace the auto with an unrelated argument. */
int nouter = TEMPLATE_TYPE_LEVEL (auto_node) - 1;
int extra = TMPL_ARGS_DEPTH (args) - nouter;
if (extra > 0)
sub_args = strip_innermost_template_args (args, extra);
}
type = tsubst (type, sub_args, complain, in_decl);
}
if (VAR_P (r))
{
/* Even if the original location is out of scope, the
newly substituted one is not. */
DECL_DEAD_FOR_LOCAL (r) = 0;
DECL_INITIALIZED_P (r) = 0;
DECL_TEMPLATE_INSTANTIATED (r) = 0;
if (type == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (type) == FUNCTION_TYPE)
{
/* It may seem that this case cannot occur, since:
typedef void f();
void g() { f x; }
declares a function, not a variable. However:
typedef void f();
template <typename T> void g() { T t; }
template void g<f>();
is an attempt to declare a variable with function
type. */
error ("variable %qD has function type",
/* R is not yet sufficiently initialized, so we
just use its name. */
DECL_NAME (r));
RETURN (error_mark_node);
}
type = complete_type (type);
/* Wait until cp_finish_decl to set this again, to handle
circular dependency (template/instantiate6.C). */
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = 0;
type = check_var_type (DECL_NAME (r), type);
if (DECL_HAS_VALUE_EXPR_P (t))
{
tree ve = DECL_VALUE_EXPR (t);
ve = tsubst_expr (ve, args, complain, in_decl,
/*constant_expression_p=*/false);
if (REFERENCE_REF_P (ve))
{
gcc_assert (TREE_CODE (type) == REFERENCE_TYPE);
ve = TREE_OPERAND (ve, 0);
}
SET_DECL_VALUE_EXPR (r, ve);
}
if (CP_DECL_THREAD_LOCAL_P (r)
&& !processing_template_decl)
set_decl_tls_model (r, decl_default_tls_model (r));
}
else if (DECL_SELF_REFERENCE_P (t))
SET_DECL_SELF_REFERENCE_P (r);
TREE_TYPE (r) = type;
cp_apply_type_quals_to_decl (cp_type_quals (type), r);
DECL_CONTEXT (r) = ctx;
/* Clear out the mangled name and RTL for the instantiation. */
SET_DECL_ASSEMBLER_NAME (r, NULL_TREE);
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL))
SET_DECL_RTL (r, NULL);
/* The initializer must not be expanded until it is required;
see [temp.inst]. */
DECL_INITIAL (r) = NULL_TREE;
if (VAR_P (r))
SET_DECL_MODE (r, VOIDmode);
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL))
SET_DECL_RTL (r, NULL);
DECL_SIZE (r) = DECL_SIZE_UNIT (r) = 0;
if (VAR_P (r))
{
/* Possibly limit visibility based on template args. */
DECL_VISIBILITY (r) = VISIBILITY_DEFAULT;
if (DECL_VISIBILITY_SPECIFIED (t))
{
DECL_VISIBILITY_SPECIFIED (r) = 0;
DECL_ATTRIBUTES (r)
= remove_attribute ("visibility", DECL_ATTRIBUTES (r));
}
determine_visibility (r);
}
if (!local_p)
{
/* A static data member declaration is always marked
external when it is declared in-class, even if an
initializer is present. We mimic the non-template
processing here. */
DECL_EXTERNAL (r) = 1;
if (DECL_NAMESPACE_SCOPE_P (t))
DECL_NOT_REALLY_EXTERN (r) = 1;
DECL_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec);
SET_DECL_IMPLICIT_INSTANTIATION (r);
register_specialization (r, gen_tmpl, argvec, false, hash);
}
else
{
if (DECL_LANG_SPECIFIC (r))
DECL_TEMPLATE_INFO (r) = NULL_TREE;
if (!cp_unevaluated_operand)
register_local_specialization (r, t);
}
DECL_CHAIN (r) = NULL_TREE;
apply_late_template_attributes (&r, DECL_ATTRIBUTES (r),
/*flags=*/0,
args, complain, in_decl);
/* Preserve a typedef that names a type. */
if (is_typedef_decl (r) && type != error_mark_node)
{
DECL_ORIGINAL_TYPE (r) = NULL_TREE;
set_underlying_type (r);
if (TYPE_DECL_ALIAS_P (r))
/* An alias template specialization can be dependent
even if its underlying type is not. */
TYPE_DEPENDENT_P_VALID (TREE_TYPE (r)) = false;
}
layout_decl (r, 0);
}
break;
default:
gcc_unreachable ();
}
#undef RETURN
out:
/* Restore the file and line information. */
input_location = saved_loc;
return r;
}
/* Substitute into the ARG_TYPES of a function type.
If END is a TREE_CHAIN, leave it and any following types
un-substituted. */
static tree
tsubst_arg_types (tree arg_types,
tree args,
tree end,
tsubst_flags_t complain,
tree in_decl)
{
tree remaining_arg_types;
tree type = NULL_TREE;
int i = 1;
tree expanded_args = NULL_TREE;
tree default_arg;
if (!arg_types || arg_types == void_list_node || arg_types == end)
return arg_types;
remaining_arg_types = tsubst_arg_types (TREE_CHAIN (arg_types),
args, end, complain, in_decl);
if (remaining_arg_types == error_mark_node)
return error_mark_node;
if (PACK_EXPANSION_P (TREE_VALUE (arg_types)))
{
/* For a pack expansion, perform substitution on the
entire expression. Later on, we'll handle the arguments
one-by-one. */
expanded_args = tsubst_pack_expansion (TREE_VALUE (arg_types),
args, complain, in_decl);
if (TREE_CODE (expanded_args) == TREE_VEC)
/* So that we'll spin through the parameters, one by one. */
i = TREE_VEC_LENGTH (expanded_args);
else
{
/* We only partially substituted into the parameter
pack. Our type is TYPE_PACK_EXPANSION. */
type = expanded_args;
expanded_args = NULL_TREE;
}
}
while (i > 0) {
--i;
if (expanded_args)
type = TREE_VEC_ELT (expanded_args, i);
else if (!type)
type = tsubst (TREE_VALUE (arg_types), args, complain, in_decl);
if (type == error_mark_node)
return error_mark_node;
if (VOID_TYPE_P (type))
{
if (complain & tf_error)
{
error ("invalid parameter type %qT", type);
if (in_decl)
error ("in declaration %q+D", in_decl);
}
return error_mark_node;
}
/* DR 657. */
if (abstract_virtuals_error_sfinae (ACU_PARM, type, complain))
return error_mark_node;
/* Do array-to-pointer, function-to-pointer conversion, and ignore
top-level qualifiers as required. */
type = cv_unqualified (type_decays_to (type));
/* We do not substitute into default arguments here. The standard
mandates that they be instantiated only when needed, which is
done in build_over_call. */
default_arg = TREE_PURPOSE (arg_types);
if (default_arg && TREE_CODE (default_arg) == DEFAULT_ARG)
{
/* We've instantiated a template before its default arguments
have been parsed. This can happen for a nested template
class, and is not an error unless we require the default
argument in a call of this function. */
remaining_arg_types =
tree_cons (default_arg, type, remaining_arg_types);
vec_safe_push (DEFARG_INSTANTIATIONS(default_arg), remaining_arg_types);
}
else
remaining_arg_types =
hash_tree_cons (default_arg, type, remaining_arg_types);
}
return remaining_arg_types;
}
/* Substitute into a FUNCTION_TYPE or METHOD_TYPE. This routine does
*not* handle the exception-specification for FNTYPE, because the
initial substitution of explicitly provided template parameters
during argument deduction forbids substitution into the
exception-specification:
[temp.deduct]
All references in the function type of the function template to the
corresponding template parameters are replaced by the specified tem-
plate argument values. If a substitution in a template parameter or
in the function type of the function template results in an invalid
type, type deduction fails. [Note: The equivalent substitution in
exception specifications is done only when the function is instanti-
ated, at which point a program is ill-formed if the substitution
results in an invalid type.] */
static tree
tsubst_function_type (tree t,
tree args,
tsubst_flags_t complain,
tree in_decl)
{
tree return_type;
tree arg_types = NULL_TREE;
tree fntype;
/* The TYPE_CONTEXT is not used for function/method types. */
gcc_assert (TYPE_CONTEXT (t) == NULL_TREE);
/* DR 1227: Mixing immediate and non-immediate contexts in deduction
failure. */
bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t);
if (late_return_type_p)
{
/* Substitute the argument types. */
arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE,
complain, in_decl);
if (arg_types == error_mark_node)
return error_mark_node;
tree save_ccp = current_class_ptr;
tree save_ccr = current_class_ref;
tree this_type = (TREE_CODE (t) == METHOD_TYPE
? TREE_TYPE (TREE_VALUE (arg_types)) : NULL_TREE);
bool do_inject = this_type && CLASS_TYPE_P (this_type);
if (do_inject)
{
/* DR 1207: 'this' is in scope in the trailing return type. */
inject_this_parameter (this_type, cp_type_quals (this_type));
}
/* Substitute the return type. */
return_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (do_inject)
{
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
}
}
else
/* Substitute the return type. */
return_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (return_type == error_mark_node)
return error_mark_node;
/* DR 486 clarifies that creation of a function type with an
invalid return type is a deduction failure. */
if (TREE_CODE (return_type) == ARRAY_TYPE
|| TREE_CODE (return_type) == FUNCTION_TYPE)
{
if (complain & tf_error)
{
if (TREE_CODE (return_type) == ARRAY_TYPE)
error ("function returning an array");
else
error ("function returning a function");
}
return error_mark_node;
}
/* And DR 657. */
if (abstract_virtuals_error_sfinae (ACU_RETURN, return_type, complain))
return error_mark_node;
if (!late_return_type_p)
{
/* Substitute the argument types. */
arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE,
complain, in_decl);
if (arg_types == error_mark_node)
return error_mark_node;
}
/* Construct a new type node and return it. */
if (TREE_CODE (t) == FUNCTION_TYPE)
{
fntype = build_function_type (return_type, arg_types);
fntype = apply_memfn_quals (fntype,
type_memfn_quals (t),
type_memfn_rqual (t));
}
else
{
tree r = TREE_TYPE (TREE_VALUE (arg_types));
/* Don't pick up extra function qualifiers from the basetype. */
r = cp_build_qualified_type_real (r, type_memfn_quals (t), complain);
if (! MAYBE_CLASS_TYPE_P (r))
{
/* [temp.deduct]
Type deduction may fail for any of the following
reasons:
-- Attempting to create "pointer to member of T" when T
is not a class type. */
if (complain & tf_error)
error ("creating pointer to member function of non-class type %qT",
r);
return error_mark_node;
}
fntype = build_method_type_directly (r, return_type,
TREE_CHAIN (arg_types));
fntype = build_ref_qualified_type (fntype, type_memfn_rqual (t));
}
fntype = cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (t));
if (late_return_type_p)
TYPE_HAS_LATE_RETURN_TYPE (fntype) = 1;
return fntype;
}
/* FNTYPE is a FUNCTION_TYPE or METHOD_TYPE. Substitute the template
ARGS into that specification, and return the substituted
specification. If there is no specification, return NULL_TREE. */
static tree
tsubst_exception_specification (tree fntype,
tree args,
tsubst_flags_t complain,
tree in_decl,
bool defer_ok)
{
tree specs;
tree new_specs;
specs = TYPE_RAISES_EXCEPTIONS (fntype);
new_specs = NULL_TREE;
if (specs && TREE_PURPOSE (specs))
{
/* A noexcept-specifier. */
tree expr = TREE_PURPOSE (specs);
if (TREE_CODE (expr) == INTEGER_CST)
new_specs = expr;
else if (defer_ok)
{
/* Defer instantiation of noexcept-specifiers to avoid
excessive instantiations (c++/49107). */
new_specs = make_node (DEFERRED_NOEXCEPT);
if (DEFERRED_NOEXCEPT_SPEC_P (specs))
{
/* We already partially instantiated this member template,
so combine the new args with the old. */
DEFERRED_NOEXCEPT_PATTERN (new_specs)
= DEFERRED_NOEXCEPT_PATTERN (expr);
DEFERRED_NOEXCEPT_ARGS (new_specs)
= add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args);
}
else
{
DEFERRED_NOEXCEPT_PATTERN (new_specs) = expr;
DEFERRED_NOEXCEPT_ARGS (new_specs) = args;
}
}
else
new_specs = tsubst_copy_and_build
(expr, args, complain, in_decl, /*function_p=*/false,
/*integral_constant_expression_p=*/true);
new_specs = build_noexcept_spec (new_specs, complain);
}
else if (specs)
{
if (! TREE_VALUE (specs))
new_specs = specs;
else
while (specs)
{
tree spec;
int i, len = 1;
tree expanded_specs = NULL_TREE;
if (PACK_EXPANSION_P (TREE_VALUE (specs)))
{
/* Expand the pack expansion type. */
expanded_specs = tsubst_pack_expansion (TREE_VALUE (specs),
args, complain,
in_decl);
if (expanded_specs == error_mark_node)
return error_mark_node;
else if (TREE_CODE (expanded_specs) == TREE_VEC)
len = TREE_VEC_LENGTH (expanded_specs);
else
{
/* We're substituting into a member template, so
we got a TYPE_PACK_EXPANSION back. Add that
expansion and move on. */
gcc_assert (TREE_CODE (expanded_specs)
== TYPE_PACK_EXPANSION);
new_specs = add_exception_specifier (new_specs,
expanded_specs,
complain);
specs = TREE_CHAIN (specs);
continue;
}
}
for (i = 0; i < len; ++i)
{
if (expanded_specs)
spec = TREE_VEC_ELT (expanded_specs, i);
else
spec = tsubst (TREE_VALUE (specs), args, complain, in_decl);
if (spec == error_mark_node)
return spec;
new_specs = add_exception_specifier (new_specs, spec,
complain);
}
specs = TREE_CHAIN (specs);
}
}
return new_specs;
}
/* Take the tree structure T and replace template parameters used
therein with the argument vector ARGS. IN_DECL is an associated
decl for diagnostics. If an error occurs, returns ERROR_MARK_NODE.
Issue error and warning messages under control of COMPLAIN. Note
that we must be relatively non-tolerant of extensions here, in
order to preserve conformance; if we allow substitutions that
should not be allowed, we may allow argument deductions that should
not succeed, and therefore report ambiguous overload situations
where there are none. In theory, we could allow the substitution,
but indicate that it should have failed, and allow our caller to
make sure that the right thing happens, but we don't try to do this
yet.
This function is used for dealing with types, decls and the like;
for expressions, use tsubst_expr or tsubst_copy. */
tree
tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
enum tree_code code;
tree type, r = NULL_TREE;
if (t == NULL_TREE || t == error_mark_node
|| t == integer_type_node
|| t == void_type_node
|| t == char_type_node
|| t == unknown_type_node
|| TREE_CODE (t) == NAMESPACE_DECL
|| TREE_CODE (t) == TRANSLATION_UNIT_DECL)
return t;
if (DECL_P (t))
return tsubst_decl (t, args, complain);
if (args == NULL_TREE)
return t;
code = TREE_CODE (t);
if (code == IDENTIFIER_NODE)
type = IDENTIFIER_TYPE_VALUE (t);
else
type = TREE_TYPE (t);
gcc_assert (type != unknown_type_node);
/* Reuse typedefs. We need to do this to handle dependent attributes,
such as attribute aligned. */
if (TYPE_P (t)
&& typedef_variant_p (t))
{
tree decl = TYPE_NAME (t);
if (alias_template_specialization_p (t))
{
/* DECL represents an alias template and we want to
instantiate it. */
tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl));
tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl);
r = instantiate_alias_template (tmpl, gen_args, complain);
}
else if (DECL_CLASS_SCOPE_P (decl)
&& CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (decl))
&& uses_template_parms (DECL_CONTEXT (decl)))
{
tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl));
tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl);
r = retrieve_specialization (tmpl, gen_args, 0);
}
else if (DECL_FUNCTION_SCOPE_P (decl)
&& DECL_TEMPLATE_INFO (DECL_CONTEXT (decl))
&& uses_template_parms (DECL_TI_ARGS (DECL_CONTEXT (decl))))
r = retrieve_local_specialization (decl);
else
/* The typedef is from a non-template context. */
return t;
if (r)
{
r = TREE_TYPE (r);
r = cp_build_qualified_type_real
(r, cp_type_quals (t) | cp_type_quals (r),
complain | tf_ignore_bad_quals);
return r;
}
else
{
/* We don't have an instantiation yet, so drop the typedef. */
int quals = cp_type_quals (t);
t = DECL_ORIGINAL_TYPE (decl);
t = cp_build_qualified_type_real (t, quals,
complain | tf_ignore_bad_quals);
}
}
bool fndecl_type = (complain & tf_fndecl_type);
complain &= ~tf_fndecl_type;
if (type
&& code != TYPENAME_TYPE
&& code != TEMPLATE_TYPE_PARM
&& code != TEMPLATE_PARM_INDEX
&& code != IDENTIFIER_NODE
&& code != FUNCTION_TYPE
&& code != METHOD_TYPE)
type = tsubst (type, args, complain, in_decl);
if (type == error_mark_node)
return error_mark_node;
switch (code)
{
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
return tsubst_aggr_type (t, args, complain, in_decl,
/*entering_scope=*/0);
case ERROR_MARK:
case IDENTIFIER_NODE:
case VOID_TYPE:
case REAL_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case BOOLEAN_TYPE:
case NULLPTR_TYPE:
case LANG_TYPE:
return t;
case INTEGER_TYPE:
if (t == integer_type_node)
return t;
if (TREE_CODE (TYPE_MIN_VALUE (t)) == INTEGER_CST
&& TREE_CODE (TYPE_MAX_VALUE (t)) == INTEGER_CST)
return t;
{
tree max, omax = TREE_OPERAND (TYPE_MAX_VALUE (t), 0);
max = tsubst_expr (omax, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
/* Fix up type of the magic NOP_EXPR with TREE_SIDE_EFFECTS if
needed. */
if (TREE_CODE (max) == NOP_EXPR
&& TREE_SIDE_EFFECTS (omax)
&& !TREE_TYPE (max))
TREE_TYPE (max) = TREE_TYPE (TREE_OPERAND (max, 0));
/* If we're in a partial instantiation, preserve the magic NOP_EXPR
with TREE_SIDE_EFFECTS that indicates this is not an integral
constant expression. */
if (processing_template_decl
&& TREE_SIDE_EFFECTS (omax) && TREE_CODE (omax) == NOP_EXPR)
{
gcc_assert (TREE_CODE (max) == NOP_EXPR);
TREE_SIDE_EFFECTS (max) = 1;
}
return compute_array_index_type (NULL_TREE, max, complain);
}
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
{
int idx;
int level;
int levels;
tree arg = NULL_TREE;
/* Early in template argument deduction substitution, we don't
want to reduce the level of 'auto', or it will be confused
with a normal template parm in subsequent deduction. */
if (is_auto (t) && (complain & tf_partial))
return t;
r = NULL_TREE;
gcc_assert (TREE_VEC_LENGTH (args) > 0);
template_parm_level_and_index (t, &level, &idx);
levels = TMPL_ARGS_DEPTH (args);
if (level <= levels
&& TREE_VEC_LENGTH (TMPL_ARGS_LEVEL (args, level)) > 0)
{
arg = TMPL_ARG (args, level, idx);
if (arg && TREE_CODE (arg) == ARGUMENT_PACK_SELECT)
{
/* See through ARGUMENT_PACK_SELECT arguments. */
arg = ARGUMENT_PACK_SELECT_ARG (arg);
/* If the selected argument is an expansion E, that most
likely means we were called from
gen_elem_of_pack_expansion_instantiation during the
substituting of pack an argument pack (which Ith
element is a pack expansion, where I is
ARGUMENT_PACK_SELECT_INDEX) into a pack expansion.
In this case, the Ith element resulting from this
substituting is going to be a pack expansion, which
pattern is the pattern of E. Let's return the
pattern of E, and
gen_elem_of_pack_expansion_instantiation will
build the resulting pack expansion from it. */
if (PACK_EXPANSION_P (arg))
{
/* Make sure we aren't throwing away arg info. */
gcc_assert (!PACK_EXPANSION_EXTRA_ARGS (arg));
arg = PACK_EXPANSION_PATTERN (arg);
}
}
}
if (arg == error_mark_node)
return error_mark_node;
else if (arg != NULL_TREE)
{
if (ARGUMENT_PACK_P (arg))
/* If ARG is an argument pack, we don't actually want to
perform a substitution here, because substitutions
for argument packs are only done
element-by-element. We can get to this point when
substituting the type of a non-type template
parameter pack, when that type actually contains
template parameter packs from an outer template, e.g.,
template<typename... Types> struct A {
template<Types... Values> struct B { };
}; */
return t;
if (code == TEMPLATE_TYPE_PARM)
{
int quals;
gcc_assert (TYPE_P (arg));
quals = cp_type_quals (arg) | cp_type_quals (t);
return cp_build_qualified_type_real
(arg, quals, complain | tf_ignore_bad_quals);
}
else if (code == BOUND_TEMPLATE_TEMPLATE_PARM)
{
/* We are processing a type constructed from a
template template parameter. */
tree argvec = tsubst (TYPE_TI_ARGS (t),
args, complain, in_decl);
if (argvec == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (arg) == TEMPLATE_DECL
|| TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE);
if (TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE)
/* Consider this code:
template <template <class> class Template>
struct Internal {
template <class Arg> using Bind = Template<Arg>;
};
template <template <class> class Template, class Arg>
using Instantiate = Template<Arg>; //#0
template <template <class> class Template,
class Argument>
using Bind =
Instantiate<Internal<Template>::template Bind,
Argument>; //#1
When #1 is parsed, the
BOUND_TEMPLATE_TEMPLATE_PARM representing the
parameter `Template' in #0 matches the
UNBOUND_CLASS_TEMPLATE representing the argument
`Internal<Template>::template Bind'; We then want
to assemble the type `Bind<Argument>' that can't
be fully created right now, because
`Internal<Template>' not being complete, the Bind
template cannot be looked up in that context. So
we need to "store" `Bind<Argument>' for later
when the context of Bind becomes complete. Let's
store that in a TYPENAME_TYPE. */
return make_typename_type (TYPE_CONTEXT (arg),
build_nt (TEMPLATE_ID_EXPR,
TYPE_IDENTIFIER (arg),
argvec),
typename_type,
complain);
/* We can get a TEMPLATE_TEMPLATE_PARM here when we
are resolving nested-types in the signature of a
member function templates. Otherwise ARG is a
TEMPLATE_DECL and is the real template to be
instantiated. */
if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM)
arg = TYPE_NAME (arg);
r = lookup_template_class (arg,
argvec, in_decl,
DECL_CONTEXT (arg),
/*entering_scope=*/0,
complain);
return cp_build_qualified_type_real
(r, cp_type_quals (t) | cp_type_quals (r), complain);
}
else if (code == TEMPLATE_TEMPLATE_PARM)
return arg;
else
/* TEMPLATE_PARM_INDEX. */
return convert_from_reference (unshare_expr (arg));
}
if (level == 1)
/* This can happen during the attempted tsubst'ing in
unify. This means that we don't yet have any information
about the template parameter in question. */
return t;
/* If we get here, we must have been looking at a parm for a
more deeply nested template. Make a new version of this
template parameter, but with a lower level. */
switch (code)
{
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
if (cp_type_quals (t))
{
r = tsubst (TYPE_MAIN_VARIANT (t), args, complain, in_decl);
r = cp_build_qualified_type_real
(r, cp_type_quals (t),
complain | (code == TEMPLATE_TYPE_PARM
? tf_ignore_bad_quals : 0));
}
else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM
&& PLACEHOLDER_TYPE_CONSTRAINTS (t)
&& (r = (TEMPLATE_PARM_DESCENDANTS
(TEMPLATE_TYPE_PARM_INDEX (t))))
&& (r = TREE_TYPE (r))
&& !PLACEHOLDER_TYPE_CONSTRAINTS (r))
/* Break infinite recursion when substituting the constraints
of a constrained placeholder. */;
else
{
r = copy_type (t);
TEMPLATE_TYPE_PARM_INDEX (r)
= reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (t),
r, levels, args, complain);
TYPE_STUB_DECL (r) = TYPE_NAME (r) = TEMPLATE_TYPE_DECL (r);
TYPE_MAIN_VARIANT (r) = r;
TYPE_POINTER_TO (r) = NULL_TREE;
TYPE_REFERENCE_TO (r) = NULL_TREE;
if (TREE_CODE (t) == TEMPLATE_TYPE_PARM)
{
/* Propagate constraints on placeholders. */
if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t))
PLACEHOLDER_TYPE_CONSTRAINTS (r)
= tsubst_constraint (constr, args, complain, in_decl);
else if (tree pl = CLASS_PLACEHOLDER_TEMPLATE (t))
{
if (DECL_TEMPLATE_TEMPLATE_PARM_P (pl))
pl = tsubst (pl, args, complain, in_decl);
CLASS_PLACEHOLDER_TEMPLATE (r) = pl;
}
}
if (TREE_CODE (r) == TEMPLATE_TEMPLATE_PARM)
/* We have reduced the level of the template
template parameter, but not the levels of its
template parameters, so canonical_type_parameter
will not be able to find the canonical template
template parameter for this level. Thus, we
require structural equality checking to compare
TEMPLATE_TEMPLATE_PARMs. */
SET_TYPE_STRUCTURAL_EQUALITY (r);
else if (TYPE_STRUCTURAL_EQUALITY_P (t))
SET_TYPE_STRUCTURAL_EQUALITY (r);
else
TYPE_CANONICAL (r) = canonical_type_parameter (r);
if (code == BOUND_TEMPLATE_TEMPLATE_PARM)
{
tree tinfo = TYPE_TEMPLATE_INFO (t);
/* We might need to substitute into the types of non-type
template parameters. */
tree tmpl = tsubst (TI_TEMPLATE (tinfo), args,
complain, in_decl);
if (tmpl == error_mark_node)
return error_mark_node;
tree argvec = tsubst (TI_ARGS (tinfo), args,
complain, in_decl);
if (argvec == error_mark_node)
return error_mark_node;
TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (r)
= build_template_info (tmpl, argvec);
}
}
break;
case TEMPLATE_PARM_INDEX:
/* OK, now substitute the type of the non-type parameter. We
couldn't do it earlier because it might be an auto parameter,
and we wouldn't need to if we had an argument. */
type = tsubst (type, args, complain, in_decl);
r = reduce_template_parm_level (t, type, levels, args, complain);
break;
default:
gcc_unreachable ();
}
return r;
}
case TREE_LIST:
{
tree purpose, value, chain;
if (t == void_list_node)
return t;
purpose = TREE_PURPOSE (t);
if (purpose)
{
purpose = tsubst (purpose, args, complain, in_decl);
if (purpose == error_mark_node)
return error_mark_node;
}
value = TREE_VALUE (t);
if (value)
{
value = tsubst (value, args, complain, in_decl);
if (value == error_mark_node)
return error_mark_node;
}
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
{
chain = tsubst (chain, args, complain, in_decl);
if (chain == error_mark_node)
return error_mark_node;
}
if (purpose == TREE_PURPOSE (t)
&& value == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
return t;
return hash_tree_cons (purpose, value, chain);
}
case TREE_BINFO:
/* We should never be tsubsting a binfo. */
gcc_unreachable ();
case TREE_VEC:
/* A vector of template arguments. */
gcc_assert (!type);
return tsubst_template_args (t, args, complain, in_decl);
case POINTER_TYPE:
case REFERENCE_TYPE:
{
if (type == TREE_TYPE (t) && TREE_CODE (type) != METHOD_TYPE)
return t;
/* [temp.deduct]
Type deduction may fail for any of the following
reasons:
-- Attempting to create a pointer to reference type.
-- Attempting to create a reference to a reference type or
a reference to void.
Core issue 106 says that creating a reference to a reference
during instantiation is no longer a cause for failure. We
only enforce this check in strict C++98 mode. */
if ((TREE_CODE (type) == REFERENCE_TYPE
&& (((cxx_dialect == cxx98) && flag_iso) || code != REFERENCE_TYPE))
|| (code == REFERENCE_TYPE && VOID_TYPE_P (type)))
{
static location_t last_loc;
/* We keep track of the last time we issued this error
message to avoid spewing a ton of messages during a
single bad template instantiation. */
if (complain & tf_error
&& last_loc != input_location)
{
if (VOID_TYPE_P (type))
error ("forming reference to void");
else if (code == POINTER_TYPE)
error ("forming pointer to reference type %qT", type);
else
error ("forming reference to reference type %qT", type);
last_loc = input_location;
}
return error_mark_node;
}
else if (TREE_CODE (type) == FUNCTION_TYPE
&& (type_memfn_quals (type) != TYPE_UNQUALIFIED
|| type_memfn_rqual (type) != REF_QUAL_NONE))
{
if (complain & tf_error)
{
if (code == POINTER_TYPE)
error ("forming pointer to qualified function type %qT",
type);
else
error ("forming reference to qualified function type %qT",
type);
}
return error_mark_node;
}
else if (code == POINTER_TYPE)
{
r = build_pointer_type (type);
if (TREE_CODE (type) == METHOD_TYPE)
r = build_ptrmemfunc_type (r);
}
else if (TREE_CODE (type) == REFERENCE_TYPE)
/* In C++0x, during template argument substitution, when there is an
attempt to create a reference to a reference type, reference
collapsing is applied as described in [14.3.1/4 temp.arg.type]:
"If a template-argument for a template-parameter T names a type
that is a reference to a type A, an attempt to create the type
'lvalue reference to cv T' creates the type 'lvalue reference to
A,' while an attempt to create the type type rvalue reference to
cv T' creates the type T"
*/
r = cp_build_reference_type
(TREE_TYPE (type),
TYPE_REF_IS_RVALUE (t) && TYPE_REF_IS_RVALUE (type));
else
r = cp_build_reference_type (type, TYPE_REF_IS_RVALUE (t));
r = cp_build_qualified_type_real (r, cp_type_quals (t), complain);
if (r != error_mark_node)
/* Will this ever be needed for TYPE_..._TO values? */
layout_type (r);
return r;
}
case OFFSET_TYPE:
{
r = tsubst (TYPE_OFFSET_BASETYPE (t), args, complain, in_decl);
if (r == error_mark_node || !MAYBE_CLASS_TYPE_P (r))
{
/* [temp.deduct]
Type deduction may fail for any of the following
reasons:
-- Attempting to create "pointer to member of T" when T
is not a class type. */
if (complain & tf_error)
error ("creating pointer to member of non-class type %qT", r);
return error_mark_node;
}
if (TREE_CODE (type) == REFERENCE_TYPE)
{
if (complain & tf_error)
error ("creating pointer to member reference type %qT", type);
return error_mark_node;
}
if (VOID_TYPE_P (type))
{
if (complain & tf_error)
error ("creating pointer to member of type void");
return error_mark_node;
}
gcc_assert (TREE_CODE (type) != METHOD_TYPE);
if (TREE_CODE (type) == FUNCTION_TYPE)
{
/* The type of the implicit object parameter gets its
cv-qualifiers from the FUNCTION_TYPE. */
tree memptr;
tree method_type
= build_memfn_type (type, r, type_memfn_quals (type),
type_memfn_rqual (type));
memptr = build_ptrmemfunc_type (build_pointer_type (method_type));
return cp_build_qualified_type_real (memptr, cp_type_quals (t),
complain);
}
else
return cp_build_qualified_type_real (build_ptrmem_type (r, type),
cp_type_quals (t),
complain);
}
case FUNCTION_TYPE:
case METHOD_TYPE:
{
tree fntype;
tree specs;
fntype = tsubst_function_type (t, args, complain, in_decl);
if (fntype == error_mark_node)
return error_mark_node;
/* Substitute the exception specification. */
specs = tsubst_exception_specification (t, args, complain, in_decl,
/*defer_ok*/fndecl_type);
if (specs == error_mark_node)
return error_mark_node;
if (specs)
fntype = build_exception_variant (fntype, specs);
return fntype;
}
case ARRAY_TYPE:
{
tree domain = tsubst (TYPE_DOMAIN (t), args, complain, in_decl);
if (domain == error_mark_node)
return error_mark_node;
/* As an optimization, we avoid regenerating the array type if
it will obviously be the same as T. */
if (type == TREE_TYPE (t) && domain == TYPE_DOMAIN (t))
return t;
/* These checks should match the ones in create_array_type_for_decl.
[temp.deduct]
The deduction may fail for any of the following reasons:
-- Attempting to create an array with an element type that
is void, a function type, or a reference type, or [DR337]
an abstract class type. */
if (VOID_TYPE_P (type)
|| TREE_CODE (type) == FUNCTION_TYPE
|| (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
|| TREE_CODE (type) == REFERENCE_TYPE)
{
if (complain & tf_error)
error ("creating array of %qT", type);
return error_mark_node;
}
if (abstract_virtuals_error_sfinae (ACU_ARRAY, type, complain))
return error_mark_node;
r = build_cplus_array_type (type, domain);
if (TYPE_USER_ALIGN (t))
{
SET_TYPE_ALIGN (r, TYPE_ALIGN (t));
TYPE_USER_ALIGN (r) = 1;
}
return r;
}
case TYPENAME_TYPE:
{
tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain,
in_decl, /*entering_scope=*/1);
if (ctx == error_mark_node)
return error_mark_node;
tree f = tsubst_copy (TYPENAME_TYPE_FULLNAME (t), args,
complain, in_decl);
if (f == error_mark_node)
return error_mark_node;
if (!MAYBE_CLASS_TYPE_P (ctx))
{
if (complain & tf_error)
error ("%qT is not a class, struct, or union type", ctx);
return error_mark_node;
}
else if (!uses_template_parms (ctx) && !TYPE_BEING_DEFINED (ctx))
{
/* Normally, make_typename_type does not require that the CTX
have complete type in order to allow things like:
template <class T> struct S { typename S<T>::X Y; };
But, such constructs have already been resolved by this
point, so here CTX really should have complete type, unless
it's a partial instantiation. */
ctx = complete_type (ctx);
if (!COMPLETE_TYPE_P (ctx))
{
if (complain & tf_error)
cxx_incomplete_type_error (NULL_TREE, ctx);
return error_mark_node;
}
}
f = make_typename_type (ctx, f, typename_type,
complain | tf_keep_type_decl);
if (f == error_mark_node)
return f;
if (TREE_CODE (f) == TYPE_DECL)
{
complain |= tf_ignore_bad_quals;
f = TREE_TYPE (f);
}
if (TREE_CODE (f) != TYPENAME_TYPE)
{
if (TYPENAME_IS_ENUM_P (t) && TREE_CODE (f) != ENUMERAL_TYPE)
{
if (complain & tf_error)
error ("%qT resolves to %qT, which is not an enumeration type",
t, f);
else
return error_mark_node;
}
else if (TYPENAME_IS_CLASS_P (t) && !CLASS_TYPE_P (f))
{
if (complain & tf_error)
error ("%qT resolves to %qT, which is is not a class type",
t, f);
else
return error_mark_node;
}
}
return cp_build_qualified_type_real
(f, cp_type_quals (f) | cp_type_quals (t), complain);
}
case UNBOUND_CLASS_TEMPLATE:
{
tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain,
in_decl, /*entering_scope=*/1);
tree name = TYPE_IDENTIFIER (t);
tree parm_list = DECL_TEMPLATE_PARMS (TYPE_NAME (t));
if (ctx == error_mark_node || name == error_mark_node)
return error_mark_node;
if (parm_list)
parm_list = tsubst_template_parms (parm_list, args, complain);
return make_unbound_class_template (ctx, name, parm_list, complain);
}
case TYPEOF_TYPE:
{
tree type;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
type = tsubst_expr (TYPEOF_TYPE_EXPR (t), args,
complain, in_decl,
/*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
type = finish_typeof (type);
return cp_build_qualified_type_real (type,
cp_type_quals (t)
| cp_type_quals (type),
complain);
}
case DECLTYPE_TYPE:
{
tree type;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
type = tsubst_copy_and_build (DECLTYPE_TYPE_EXPR (t), args,
complain|tf_decltype, in_decl,
/*function_p*/false,
/*integral_constant_expression*/false);
if (DECLTYPE_FOR_INIT_CAPTURE (t))
{
if (type == NULL_TREE)
{
if (complain & tf_error)
error ("empty initializer in lambda init-capture");
type = error_mark_node;
}
else if (TREE_CODE (type) == TREE_LIST)
type = build_x_compound_expr_from_list (type, ELK_INIT, complain);
}
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
if (DECLTYPE_FOR_LAMBDA_CAPTURE (t))
type = lambda_capture_field_type (type,
DECLTYPE_FOR_INIT_CAPTURE (t),
DECLTYPE_FOR_REF_CAPTURE (t));
else if (DECLTYPE_FOR_LAMBDA_PROXY (t))
type = lambda_proxy_type (type);
else
{
bool id = DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (t);
if (id && TREE_CODE (DECLTYPE_TYPE_EXPR (t)) == BIT_NOT_EXPR
&& EXPR_P (type))
/* In a template ~id could be either a complement expression
or an unqualified-id naming a destructor; if instantiating
it produces an expression, it's not an id-expression or
member access. */
id = false;
type = finish_decltype_type (type, id, complain);
}
return cp_build_qualified_type_real (type,
cp_type_quals (t)
| cp_type_quals (type),
complain | tf_ignore_bad_quals);
}
case UNDERLYING_TYPE:
{
tree type = tsubst (UNDERLYING_TYPE_TYPE (t), args,
complain, in_decl);
return finish_underlying_type (type);
}
case TYPE_ARGUMENT_PACK:
case NONTYPE_ARGUMENT_PACK:
{
tree r;
if (code == NONTYPE_ARGUMENT_PACK)
{
r = make_node (code);
/* Set the already-substituted type. */
TREE_TYPE (r) = type;
}
else
r = cxx_make_type (code);
tree pack_args = ARGUMENT_PACK_ARGS (t);
pack_args = tsubst_template_args (pack_args, args, complain, in_decl);
SET_ARGUMENT_PACK_ARGS (r, pack_args);
return r;
}
case VOID_CST:
case INTEGER_CST:
case REAL_CST:
case STRING_CST:
case PLUS_EXPR:
case MINUS_EXPR:
case NEGATE_EXPR:
case NOP_EXPR:
case INDIRECT_REF:
case ADDR_EXPR:
case CALL_EXPR:
case ARRAY_REF:
case SCOPE_REF:
/* We should use one of the expression tsubsts for these codes. */
gcc_unreachable ();
default:
sorry ("use of %qs in template", get_tree_code_name (code));
return error_mark_node;
}
}
/* Like tsubst_expr for a BASELINK. OBJECT_TYPE, if non-NULL, is the
type of the expression on the left-hand side of the "." or "->"
operator. */
static tree
tsubst_baselink (tree baselink, tree object_type,
tree args, tsubst_flags_t complain, tree in_decl)
{
tree name;
tree qualifying_scope;
tree fns;
tree optype;
tree template_args = 0;
bool template_id_p = false;
bool qualified = BASELINK_QUALIFIED_P (baselink);
/* A baselink indicates a function from a base class. Both the
BASELINK_ACCESS_BINFO and the base class referenced may
indicate bases of the template class, rather than the
instantiated class. In addition, lookups that were not
ambiguous before may be ambiguous now. Therefore, we perform
the lookup again. */
qualifying_scope = BINFO_TYPE (BASELINK_ACCESS_BINFO (baselink));
qualifying_scope = tsubst (qualifying_scope, args,
complain, in_decl);
fns = BASELINK_FUNCTIONS (baselink);
optype = tsubst (BASELINK_OPTYPE (baselink), args, complain, in_decl);
if (TREE_CODE (fns) == TEMPLATE_ID_EXPR)
{
template_id_p = true;
template_args = TREE_OPERAND (fns, 1);
fns = TREE_OPERAND (fns, 0);
if (template_args)
template_args = tsubst_template_args (template_args, args,
complain, in_decl);
}
name = DECL_NAME (get_first_fn (fns));
if (IDENTIFIER_TYPENAME_P (name))
name = mangle_conv_op_name_for_type (optype);
baselink = lookup_fnfields (qualifying_scope, name, /*protect=*/1);
if (!baselink)
{
if (constructor_name_p (name, qualifying_scope))
{
if (complain & tf_error)
error ("cannot call constructor %<%T::%D%> directly",
qualifying_scope, name);
}
return error_mark_node;
}
/* If lookup found a single function, mark it as used at this
point. (If it lookup found multiple functions the one selected
later by overload resolution will be marked as used at that
point.) */
if (BASELINK_P (baselink))
fns = BASELINK_FUNCTIONS (baselink);
if (!template_id_p && !really_overloaded_fn (fns)
&& !mark_used (OVL_CURRENT (fns), complain) && !(complain & tf_error))
return error_mark_node;
/* Add back the template arguments, if present. */
if (BASELINK_P (baselink) && template_id_p)
BASELINK_FUNCTIONS (baselink)
= build2 (TEMPLATE_ID_EXPR,
unknown_type_node,
BASELINK_FUNCTIONS (baselink),
template_args);
/* Update the conversion operator type. */
if (BASELINK_P (baselink))
BASELINK_OPTYPE (baselink) = optype;
if (!object_type)
object_type = current_class_type;
if (qualified || name == complete_dtor_identifier)
{
baselink = adjust_result_of_qualified_name_lookup (baselink,
qualifying_scope,
object_type);
if (!qualified)
/* We need to call adjust_result_of_qualified_name_lookup in case the
destructor names a base class, but we unset BASELINK_QUALIFIED_P
so that we still get virtual function binding. */
BASELINK_QUALIFIED_P (baselink) = false;
}
return baselink;
}
/* Like tsubst_expr for a SCOPE_REF, given by QUALIFIED_ID. DONE is
true if the qualified-id will be a postfix-expression in-and-of
itself; false if more of the postfix-expression follows the
QUALIFIED_ID. ADDRESS_P is true if the qualified-id is the operand
of "&". */
static tree
tsubst_qualified_id (tree qualified_id, tree args,
tsubst_flags_t complain, tree in_decl,
bool done, bool address_p)
{
tree expr;
tree scope;
tree name;
bool is_template;
tree template_args;
location_t loc = UNKNOWN_LOCATION;
gcc_assert (TREE_CODE (qualified_id) == SCOPE_REF);
/* Figure out what name to look up. */
name = TREE_OPERAND (qualified_id, 1);
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
{
is_template = true;
loc = EXPR_LOCATION (name);
template_args = TREE_OPERAND (name, 1);
if (template_args)
template_args = tsubst_template_args (template_args, args,
complain, in_decl);
if (template_args == error_mark_node)
return error_mark_node;
name = TREE_OPERAND (name, 0);
}
else
{
is_template = false;
template_args = NULL_TREE;
}
/* Substitute into the qualifying scope. When there are no ARGS, we
are just trying to simplify a non-dependent expression. In that
case the qualifying scope may be dependent, and, in any case,
substituting will not help. */
scope = TREE_OPERAND (qualified_id, 0);
if (args)
{
scope = tsubst (scope, args, complain, in_decl);
expr = tsubst_copy (name, args, complain, in_decl);
}
else
expr = name;
if (dependent_scope_p (scope))
{
if (is_template)
expr = build_min_nt_loc (loc, TEMPLATE_ID_EXPR, expr, template_args);
tree r = build_qualified_name (NULL_TREE, scope, expr,
QUALIFIED_NAME_IS_TEMPLATE (qualified_id));
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (qualified_id);
return r;
}
if (!BASELINK_P (name) && !DECL_P (expr))
{
if (TREE_CODE (expr) == BIT_NOT_EXPR)
{
/* A BIT_NOT_EXPR is used to represent a destructor. */
if (!check_dtor_name (scope, TREE_OPERAND (expr, 0)))
{
error ("qualifying type %qT does not match destructor name ~%qT",
scope, TREE_OPERAND (expr, 0));
expr = error_mark_node;
}
else
expr = lookup_qualified_name (scope, complete_dtor_identifier,
/*is_type_p=*/0, false);
}
else
expr = lookup_qualified_name (scope, expr, /*is_type_p=*/0, false);
if (TREE_CODE (TREE_CODE (expr) == TEMPLATE_DECL
? DECL_TEMPLATE_RESULT (expr) : expr) == TYPE_DECL)
{
if (complain & tf_error)
{
error ("dependent-name %qE is parsed as a non-type, but "
"instantiation yields a type", qualified_id);
inform (input_location, "say %<typename %E%> if a type is meant", qualified_id);
}
return error_mark_node;
}
}
if (DECL_P (expr))
{
check_accessibility_of_qualified_id (expr, /*object_type=*/NULL_TREE,
scope);
/* Remember that there was a reference to this entity. */
if (!mark_used (expr, complain) && !(complain & tf_error))
return error_mark_node;
}
if (expr == error_mark_node || TREE_CODE (expr) == TREE_LIST)
{
if (complain & tf_error)
qualified_name_lookup_error (scope,
TREE_OPERAND (qualified_id, 1),
expr, input_location);
return error_mark_node;
}
if (is_template)
{
if (variable_template_p (expr))
expr = lookup_and_finish_template_variable (expr, template_args,
complain);
else
expr = lookup_template_function (expr, template_args);
}
if (expr == error_mark_node && complain & tf_error)
qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1),
expr, input_location);
else if (TYPE_P (scope))
{
expr = (adjust_result_of_qualified_name_lookup
(expr, scope, current_nonlambda_class_type ()));
expr = (finish_qualified_id_expr
(scope, expr, done, address_p && PTRMEM_OK_P (qualified_id),
QUALIFIED_NAME_IS_TEMPLATE (qualified_id),
/*template_arg_p=*/false, complain));
}
/* Expressions do not generally have reference type. */
if (TREE_CODE (expr) != SCOPE_REF
/* However, if we're about to form a pointer-to-member, we just
want the referenced member referenced. */
&& TREE_CODE (expr) != OFFSET_REF)
expr = convert_from_reference (expr);
if (REF_PARENTHESIZED_P (qualified_id))
expr = force_paren_expr (expr);
return expr;
}
/* tsubst the initializer for a VAR_DECL. INIT is the unsubstituted
initializer, DECL is the substituted VAR_DECL. Other arguments are as
for tsubst. */
static tree
tsubst_init (tree init, tree decl, tree args,
tsubst_flags_t complain, tree in_decl)
{
if (!init)
return NULL_TREE;
init = tsubst_expr (init, args, complain, in_decl, false);
if (!init && TREE_TYPE (decl) != error_mark_node)
{
/* If we had an initializer but it
instantiated to nothing,
value-initialize the object. This will
only occur when the initializer was a
pack expansion where the parameter packs
used in that expansion were of length
zero. */
init = build_value_init (TREE_TYPE (decl),
complain);
if (TREE_CODE (init) == AGGR_INIT_EXPR)
init = get_target_expr_sfinae (init, complain);
if (TREE_CODE (init) == TARGET_EXPR)
TARGET_EXPR_DIRECT_INIT_P (init) = true;
}
return init;
}
/* Like tsubst, but deals with expressions. This function just replaces
template parms; to finish processing the resultant expression, use
tsubst_copy_and_build or tsubst_expr. */
static tree
tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
enum tree_code code;
tree r;
if (t == NULL_TREE || t == error_mark_node || args == NULL_TREE)
return t;
code = TREE_CODE (t);
switch (code)
{
case PARM_DECL:
r = retrieve_local_specialization (t);
if (r == NULL_TREE)
{
/* We get here for a use of 'this' in an NSDMI as part of a
constructor call or as part of an aggregate initialization. */
if (DECL_NAME (t) == this_identifier
&& ((current_function_decl
&& DECL_CONSTRUCTOR_P (current_function_decl))
|| (current_class_ref
&& TREE_CODE (current_class_ref) == PLACEHOLDER_EXPR)))
return current_class_ptr;
/* This can happen for a parameter name used later in a function
declaration (such as in a late-specified return type). Just
make a dummy decl, since it's only used for its type. */
gcc_assert (cp_unevaluated_operand != 0);
r = tsubst_decl (t, args, complain);
/* Give it the template pattern as its context; its true context
hasn't been instantiated yet and this is good enough for
mangling. */
DECL_CONTEXT (r) = DECL_CONTEXT (t);
}
if (TREE_CODE (r) == ARGUMENT_PACK_SELECT)
r = ARGUMENT_PACK_SELECT_ARG (r);
if (!mark_used (r, complain) && !(complain & tf_error))
return error_mark_node;
return r;
case CONST_DECL:
{
tree enum_type;
tree v;
if (DECL_TEMPLATE_PARM_P (t))
return tsubst_copy (DECL_INITIAL (t), args, complain, in_decl);
/* There is no need to substitute into namespace-scope
enumerators. */
if (DECL_NAMESPACE_SCOPE_P (t))
return t;
/* If ARGS is NULL, then T is known to be non-dependent. */
if (args == NULL_TREE)
return scalar_constant_value (t);
/* Unfortunately, we cannot just call lookup_name here.
Consider:
template <int I> int f() {
enum E { a = I };
struct S { void g() { E e = a; } };
};
When we instantiate f<7>::S::g(), say, lookup_name is not
clever enough to find f<7>::a. */
enum_type
= tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl,
/*entering_scope=*/0);
for (v = TYPE_VALUES (enum_type);
v != NULL_TREE;
v = TREE_CHAIN (v))
if (TREE_PURPOSE (v) == DECL_NAME (t))
return TREE_VALUE (v);
/* We didn't find the name. That should never happen; if
name-lookup found it during preliminary parsing, we
should find it again here during instantiation. */
gcc_unreachable ();
}
return t;
case FIELD_DECL:
if (PACK_EXPANSION_P (TREE_TYPE (t)))
{
/* Check for a local specialization set up by
tsubst_pack_expansion. */
if (tree r = retrieve_local_specialization (t))
{
if (TREE_CODE (r) == ARGUMENT_PACK_SELECT)
r = ARGUMENT_PACK_SELECT_ARG (r);
return r;
}
/* When retrieving a capture pack from a generic lambda, remove the
lambda call op's own template argument list from ARGS. Only the
template arguments active for the closure type should be used to
retrieve the pack specialization. */
if (LAMBDA_FUNCTION_P (current_function_decl)
&& (template_class_depth (DECL_CONTEXT (t))
!= TMPL_ARGS_DEPTH (args)))
args = strip_innermost_template_args (args, 1);
/* Otherwise return the full NONTYPE_ARGUMENT_PACK that
tsubst_decl put in the hash table. */
return retrieve_specialization (t, args, 0);
}
if (DECL_CONTEXT (t))
{
tree ctx;
ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl,
/*entering_scope=*/1);
if (ctx != DECL_CONTEXT (t))
{
tree r = lookup_field (ctx, DECL_NAME (t), 0, false);
if (!r)
{
if (complain & tf_error)
error ("using invalid field %qD", t);
return error_mark_node;
}
return r;
}
}
return t;
case VAR_DECL:
case FUNCTION_DECL:
if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t))
r = tsubst (t, args, complain, in_decl);
else if (local_variable_p (t)
&& uses_template_parms (DECL_CONTEXT (t)))
{
r = retrieve_local_specialization (t);
if (r == NULL_TREE)
{
/* First try name lookup to find the instantiation. */
r = lookup_name (DECL_NAME (t));
if (r && !is_capture_proxy (r))
{
/* Make sure that the one we found is the one we want. */
tree ctx = DECL_CONTEXT (t);
if (DECL_LANG_SPECIFIC (ctx) && DECL_TEMPLATE_INFO (ctx))
ctx = tsubst (ctx, args, complain, in_decl);
if (ctx != DECL_CONTEXT (r))
r = NULL_TREE;
}
if (r)
/* OK */;
else
{
/* This can happen for a variable used in a
late-specified return type of a local lambda, or for a
local static or constant. Building a new VAR_DECL
should be OK in all those cases. */
r = tsubst_decl (t, args, complain);
if (local_specializations)
/* Avoid infinite recursion (79640). */
register_local_specialization (r, t);
if (decl_maybe_constant_var_p (r))
{
/* We can't call cp_finish_decl, so handle the
initializer by hand. */
tree init = tsubst_init (DECL_INITIAL (t), r, args,
complain, in_decl);
if (!processing_template_decl)
init = maybe_constant_init (init);
if (processing_template_decl
? potential_constant_expression (init)
: reduced_constant_expression_p (init))
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r)
= TREE_CONSTANT (r) = true;
DECL_INITIAL (r) = init;
if (tree auto_node = type_uses_auto (TREE_TYPE (r)))
TREE_TYPE (r)
= do_auto_deduction (TREE_TYPE (r), init, auto_node,
complain, adc_variable_type);
}
gcc_assert (cp_unevaluated_operand || TREE_STATIC (r)
|| decl_constant_var_p (r)
|| errorcount || sorrycount);
if (!processing_template_decl
&& !TREE_STATIC (r))
r = process_outer_var_ref (r, complain);
}
/* Remember this for subsequent uses. */
if (local_specializations)
register_local_specialization (r, t);
}
}
else
r = t;
if (!mark_used (r, complain))
return error_mark_node;
return r;
case NAMESPACE_DECL:
return t;
case OVERLOAD:
/* An OVERLOAD will always be a non-dependent overload set; an
overload set from function scope will just be represented with an
IDENTIFIER_NODE, and from class scope with a BASELINK. */
gcc_assert (!uses_template_parms (t));
return t;
case BASELINK:
return tsubst_baselink (t, current_nonlambda_class_type (),
args, complain, in_decl);
case TEMPLATE_DECL:
if (DECL_TEMPLATE_TEMPLATE_PARM_P (t))
return tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)),
args, complain, in_decl);
else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t))
return tsubst (t, args, complain, in_decl);
else if (DECL_CLASS_SCOPE_P (t)
&& uses_template_parms (DECL_CONTEXT (t)))
{
/* Template template argument like the following example need
special treatment:
template <template <class> class TT> struct C {};
template <class T> struct D {
template <class U> struct E {};
C<E> c; // #1
};
D<int> d; // #2
We are processing the template argument `E' in #1 for
the template instantiation #2. Originally, `E' is a
TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we
have to substitute this with one having context `D<int>'. */
tree context = tsubst (DECL_CONTEXT (t), args, complain, in_decl);
if (dependent_scope_p (context))
{
/* When rewriting a constructor into a deduction guide, a
non-dependent name can become dependent, so memtmpl<args>
becomes context::template memtmpl<args>. */
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return build_qualified_name (type, context, DECL_NAME (t),
/*template*/true);
}
return lookup_field (context, DECL_NAME(t), 0, false);
}
else
/* Ordinary template template argument. */
return t;
case CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case STATIC_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case IMPLICIT_CONV_EXPR:
case CONVERT_EXPR:
case NOP_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
return build1 (code, type, op0);
}
case SIZEOF_EXPR:
if (PACK_EXPANSION_P (TREE_OPERAND (t, 0))
|| ARGUMENT_PACK_P (TREE_OPERAND (t, 0)))
{
tree expanded, op = TREE_OPERAND (t, 0);
int len = 0;
if (SIZEOF_EXPR_TYPE_P (t))
op = TREE_TYPE (op);
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
/* We only want to compute the number of arguments. */
if (PACK_EXPANSION_P (op))
expanded = tsubst_pack_expansion (op, args, complain, in_decl);
else
expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op),
args, complain, in_decl);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
if (TREE_CODE (expanded) == TREE_VEC)
{
len = TREE_VEC_LENGTH (expanded);
/* Set TREE_USED for the benefit of -Wunused. */
for (int i = 0; i < len; i++)
if (DECL_P (TREE_VEC_ELT (expanded, i)))
TREE_USED (TREE_VEC_ELT (expanded, i)) = true;
}
if (expanded == error_mark_node)
return error_mark_node;
else if (PACK_EXPANSION_P (expanded)
|| (TREE_CODE (expanded) == TREE_VEC
&& pack_expansion_args_count (expanded)))
{
if (PACK_EXPANSION_P (expanded))
/* OK. */;
else if (TREE_VEC_LENGTH (expanded) == 1)
expanded = TREE_VEC_ELT (expanded, 0);
else
expanded = make_argument_pack (expanded);
if (TYPE_P (expanded))
return cxx_sizeof_or_alignof_type (expanded, SIZEOF_EXPR,
complain & tf_error);
else
return cxx_sizeof_or_alignof_expr (expanded, SIZEOF_EXPR,
complain & tf_error);
}
else
return build_int_cst (size_type_node, len);
}
if (SIZEOF_EXPR_TYPE_P (t))
{
r = tsubst (TREE_TYPE (TREE_OPERAND (t, 0)),
args, complain, in_decl);
r = build1 (NOP_EXPR, r, error_mark_node);
r = build1 (SIZEOF_EXPR,
tsubst (TREE_TYPE (t), args, complain, in_decl), r);
SIZEOF_EXPR_TYPE_P (r) = 1;
return r;
}
/* Fall through */
case INDIRECT_REF:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
case BIT_NOT_EXPR:
case ADDR_EXPR:
case UNARY_PLUS_EXPR: /* Unary + */
case ALIGNOF_EXPR:
case AT_ENCODE_EXPR:
case ARROW_EXPR:
case THROW_EXPR:
case TYPEID_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case PAREN_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
return build1 (code, type, op0);
}
case COMPONENT_REF:
{
tree object;
tree name;
object = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
name = TREE_OPERAND (t, 1);
if (TREE_CODE (name) == BIT_NOT_EXPR)
{
name = tsubst_copy (TREE_OPERAND (name, 0), args,
complain, in_decl);
name = build1 (BIT_NOT_EXPR, NULL_TREE, name);
}
else if (TREE_CODE (name) == SCOPE_REF
&& TREE_CODE (TREE_OPERAND (name, 1)) == BIT_NOT_EXPR)
{
tree base = tsubst_copy (TREE_OPERAND (name, 0), args,
complain, in_decl);
name = TREE_OPERAND (name, 1);
name = tsubst_copy (TREE_OPERAND (name, 0), args,
complain, in_decl);
name = build1 (BIT_NOT_EXPR, NULL_TREE, name);
name = build_qualified_name (/*type=*/NULL_TREE,
base, name,
/*template_p=*/false);
}
else if (BASELINK_P (name))
name = tsubst_baselink (name,
non_reference (TREE_TYPE (object)),
args, complain,
in_decl);
else
name = tsubst_copy (name, args, complain, in_decl);
return build_nt (COMPONENT_REF, object, name, NULL_TREE);
}
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case RSHIFT_EXPR:
case LSHIFT_EXPR:
case RROTATE_EXPR:
case LROTATE_EXPR:
case EQ_EXPR:
case NE_EXPR:
case MAX_EXPR:
case MIN_EXPR:
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
case COMPOUND_EXPR:
case DOTSTAR_EXPR:
case MEMBER_REF:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
return build_nt (code, op0, op1);
}
case SCOPE_REF:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
return build_qualified_name (/*type=*/NULL_TREE, op0, op1,
QUALIFIED_NAME_IS_TEMPLATE (t));
}
case ARRAY_REF:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
return build_nt (ARRAY_REF, op0, op1, NULL_TREE, NULL_TREE);
}
case CALL_EXPR:
{
int n = VL_EXP_OPERAND_LENGTH (t);
tree result = build_vl_exp (CALL_EXPR, n);
int i;
for (i = 0; i < n; i++)
TREE_OPERAND (t, i) = tsubst_copy (TREE_OPERAND (t, i), args,
complain, in_decl);
return result;
}
case COND_EXPR:
case MODOP_EXPR:
case PSEUDO_DTOR_EXPR:
case VEC_PERM_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl);
r = build_nt (code, op0, op1, op2);
TREE_NO_WARNING (r) = TREE_NO_WARNING (t);
return r;
}
case NEW_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl);
r = build_nt (code, op0, op1, op2);
NEW_EXPR_USE_GLOBAL (r) = NEW_EXPR_USE_GLOBAL (t);
return r;
}
case DELETE_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
r = build_nt (code, op0, op1);
DELETE_EXPR_USE_GLOBAL (r) = DELETE_EXPR_USE_GLOBAL (t);
DELETE_EXPR_USE_VEC (r) = DELETE_EXPR_USE_VEC (t);
return r;
}
case TEMPLATE_ID_EXPR:
{
/* Substituted template arguments */
tree fn = TREE_OPERAND (t, 0);
tree targs = TREE_OPERAND (t, 1);
fn = tsubst_copy (fn, args, complain, in_decl);
if (targs)
targs = tsubst_template_args (targs, args, complain, in_decl);
return lookup_template_function (fn, targs);
}
case TREE_LIST:
{
tree purpose, value, chain;
if (t == void_list_node)
return t;
purpose = TREE_PURPOSE (t);
if (purpose)
purpose = tsubst_copy (purpose, args, complain, in_decl);
value = TREE_VALUE (t);
if (value)
value = tsubst_copy (value, args, complain, in_decl);
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = tsubst_copy (chain, args, complain, in_decl);
if (purpose == TREE_PURPOSE (t)
&& value == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
return t;
return tree_cons (purpose, value, chain);
}
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
case INTEGER_TYPE:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
case POINTER_TYPE:
case REFERENCE_TYPE:
case OFFSET_TYPE:
case FUNCTION_TYPE:
case METHOD_TYPE:
case ARRAY_TYPE:
case TYPENAME_TYPE:
case UNBOUND_CLASS_TEMPLATE:
case TYPEOF_TYPE:
case DECLTYPE_TYPE:
case TYPE_DECL:
return tsubst (t, args, complain, in_decl);
case USING_DECL:
t = DECL_NAME (t);
/* Fall through. */
case IDENTIFIER_NODE:
if (IDENTIFIER_TYPENAME_P (t))
{
tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return mangle_conv_op_name_for_type (new_type);
}
else
return t;
case CONSTRUCTOR:
/* This is handled by tsubst_copy_and_build. */
gcc_unreachable ();
case VA_ARG_EXPR:
{
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
return build_x_va_arg (EXPR_LOCATION (t), op0, type);
}
case CLEANUP_POINT_EXPR:
/* We shouldn't have built any of these during initial template
generation. Instead, they should be built during instantiation
in response to the saved STMT_IS_FULL_EXPR_P setting. */
gcc_unreachable ();
case OFFSET_REF:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl);
tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl);
r = build2 (code, type, op0, op1);
PTRMEM_OK_P (r) = PTRMEM_OK_P (t);
if (!mark_used (TREE_OPERAND (r, 1), complain)
&& !(complain & tf_error))
return error_mark_node;
return r;
}
case EXPR_PACK_EXPANSION:
error ("invalid use of pack expansion expression");
return error_mark_node;
case NONTYPE_ARGUMENT_PACK:
error ("use %<...%> to expand argument pack");
return error_mark_node;
case VOID_CST:
gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t)));
return t;
case INTEGER_CST:
case REAL_CST:
case STRING_CST:
case COMPLEX_CST:
{
/* Instantiate any typedefs in the type. */
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
r = fold_convert (type, t);
gcc_assert (TREE_CODE (r) == code);
return r;
}
case PTRMEM_CST:
/* These can sometimes show up in a partial instantiation, but never
involve template parms. */
gcc_assert (!uses_template_parms (t));
return t;
case UNARY_LEFT_FOLD_EXPR:
return tsubst_unary_left_fold (t, args, complain, in_decl);
case UNARY_RIGHT_FOLD_EXPR:
return tsubst_unary_right_fold (t, args, complain, in_decl);
case BINARY_LEFT_FOLD_EXPR:
return tsubst_binary_left_fold (t, args, complain, in_decl);
case BINARY_RIGHT_FOLD_EXPR:
return tsubst_binary_right_fold (t, args, complain, in_decl);
default:
/* We shouldn't get here, but keep going if !flag_checking. */
if (flag_checking)
gcc_unreachable ();
return t;
}
}
/* Helper function for tsubst_omp_clauses, used for instantiation of
OMP_CLAUSE_DECL of clauses. */
static tree
tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain,
tree in_decl)
{
if (decl == NULL_TREE)
return NULL_TREE;
/* Handle an OpenMP array section represented as a TREE_LIST (or
OMP_CLAUSE_DEPEND_KIND). An OMP_CLAUSE_DEPEND (with a depend
kind of OMP_CLAUSE_DEPEND_SINK) can also be represented as a
TREE_LIST. We can handle it exactly the same as an array section
(purpose, value, and a chain), even though the nomenclature
(low_bound, length, etc) is different. */
if (TREE_CODE (decl) == TREE_LIST)
{
tree low_bound
= tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl,
/*integral_constant_expression_p=*/false);
tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain,
in_decl);
if (TREE_PURPOSE (decl) == low_bound
&& TREE_VALUE (decl) == length
&& TREE_CHAIN (decl) == chain)
return decl;
tree ret = tree_cons (low_bound, length, chain);
OMP_CLAUSE_DEPEND_SINK_NEGATIVE (ret)
= OMP_CLAUSE_DEPEND_SINK_NEGATIVE (decl);
return ret;
}
tree ret = tsubst_expr (decl, args, complain, in_decl,
/*integral_constant_expression_p=*/false);
/* Undo convert_from_reference tsubst_expr could have called. */
if (decl
&& REFERENCE_REF_P (ret)
&& !REFERENCE_REF_P (decl))
ret = TREE_OPERAND (ret, 0);
return ret;
}
/* Like tsubst_copy, but specifically for OpenMP clauses. */
static tree
tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort,
tree args, tsubst_flags_t complain, tree in_decl)
{
tree new_clauses = NULL_TREE, nc, oc;
tree linear_no_step = NULL_TREE;
for (oc = clauses; oc ; oc = OMP_CLAUSE_CHAIN (oc))
{
nc = copy_node (oc);
OMP_CLAUSE_CHAIN (nc) = new_clauses;
new_clauses = nc;
switch (OMP_CLAUSE_CODE (nc))
{
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_STMT (oc))
{
OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list ();
tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
OMP_CLAUSE_LASTPRIVATE_STMT (nc)
= pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc));
}
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_UNIFORM:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl);
break;
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
OMP_CLAUSE_OPERAND (nc, 0)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
break;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc);
if (TREE_CODE (placeholder) == SCOPE_REF)
{
tree scope = tsubst (TREE_OPERAND (placeholder, 0), args,
complain, in_decl);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (nc)
= build_qualified_name (NULL_TREE, scope,
TREE_OPERAND (placeholder, 1),
false);
}
else
gcc_assert (identifier_p (placeholder));
}
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl);
break;
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_ALIGNED:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl);
OMP_CLAUSE_OPERAND (nc, 1)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
break;
case OMP_CLAUSE_LINEAR:
OMP_CLAUSE_DECL (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
in_decl);
if (OMP_CLAUSE_LINEAR_STEP (oc) == NULL_TREE)
{
gcc_assert (!linear_no_step);
linear_no_step = nc;
}
else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (oc))
OMP_CLAUSE_LINEAR_STEP (nc)
= tsubst_omp_clause_decl (OMP_CLAUSE_LINEAR_STEP (oc), args,
complain, in_decl);
else
OMP_CLAUSE_LINEAR_STEP (nc)
= tsubst_expr (OMP_CLAUSE_LINEAR_STEP (oc), args, complain,
in_decl,
/*integral_constant_expression_p=*/false);
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_INBRANCH:
case OMP_CLAUSE_NOTINBRANCH:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_FOR:
case OMP_CLAUSE_PARALLEL:
case OMP_CLAUSE_SECTIONS:
case OMP_CLAUSE_TASKGROUP:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
break;
default:
gcc_unreachable ();
}
if ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP)
switch (OMP_CLAUSE_CODE (nc))
{
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
/* tsubst_expr on SCOPE_REF results in returning
finish_non_static_data_member result. Undo that here. */
if (TREE_CODE (OMP_CLAUSE_DECL (oc)) == SCOPE_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (oc), 1))
== IDENTIFIER_NODE))
{
tree t = OMP_CLAUSE_DECL (nc);
tree v = t;
while (v)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& DECL_NAME (v) == this_identifier)
OMP_CLAUSE_DECL (nc) = TREE_OPERAND (t, 1);
/* FALLTHRU */
default:
v = NULL_TREE;
break;
}
}
else if (VAR_P (OMP_CLAUSE_DECL (oc))
&& DECL_HAS_VALUE_EXPR_P (OMP_CLAUSE_DECL (oc))
&& DECL_ARTIFICIAL (OMP_CLAUSE_DECL (oc))
&& DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (oc))
&& DECL_OMP_PRIVATIZED_MEMBER (OMP_CLAUSE_DECL (oc)))
{
tree decl = OMP_CLAUSE_DECL (nc);
if (VAR_P (decl))
{
if (!DECL_LANG_SPECIFIC (decl))
retrofit_lang_decl (decl);
DECL_OMP_PRIVATIZED_MEMBER (decl) = 1;
}
}
break;
default:
break;
}
}
new_clauses = nreverse (new_clauses);
if (ort != C_ORT_OMP_DECLARE_SIMD)
{
new_clauses = finish_omp_clauses (new_clauses, ort);
if (linear_no_step)
for (nc = new_clauses; nc; nc = OMP_CLAUSE_CHAIN (nc))
if (nc == linear_no_step)
{
OMP_CLAUSE_LINEAR_STEP (nc) = NULL_TREE;
break;
}
}
return new_clauses;
}
/* Like tsubst_copy_and_build, but unshare TREE_LIST nodes. */
static tree
tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain,
tree in_decl)
{
#define RECUR(t) tsubst_copy_asm_operands (t, args, complain, in_decl)
tree purpose, value, chain;
if (t == NULL)
return t;
if (TREE_CODE (t) != TREE_LIST)
return tsubst_copy_and_build (t, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
if (t == void_list_node)
return t;
purpose = TREE_PURPOSE (t);
if (purpose)
purpose = RECUR (purpose);
value = TREE_VALUE (t);
if (value)
{
if (TREE_CODE (value) != LABEL_DECL)
value = RECUR (value);
else
{
value = lookup_label (DECL_NAME (value));
gcc_assert (TREE_CODE (value) == LABEL_DECL);
TREE_USED (value) = 1;
}
}
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = RECUR (chain);
return tree_cons (purpose, value, chain);
#undef RECUR
}
/* Used to temporarily communicate the list of #pragma omp parallel
clauses to #pragma omp for instantiation if they are combined
together. */
static tree *omp_parallel_combined_clauses;
/* Substitute one OMP_FOR iterator. */
static void
tsubst_omp_for_iterator (tree t, int i, tree declv, tree orig_declv,
tree initv, tree condv, tree incrv, tree *clauses,
tree args, tsubst_flags_t complain, tree in_decl,
bool integral_constant_expression_p)
{
#define RECUR(NODE) \
tsubst_expr ((NODE), args, complain, in_decl, \
integral_constant_expression_p)
tree decl, init, cond, incr;
init = TREE_VEC_ELT (OMP_FOR_INIT (t), i);
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
if (orig_declv && OMP_FOR_ORIG_DECLS (t))
{
tree o = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (t), i);
TREE_VEC_ELT (orig_declv, i) = RECUR (o);
}
decl = TREE_OPERAND (init, 0);
init = TREE_OPERAND (init, 1);
tree decl_expr = NULL_TREE;
if (init && TREE_CODE (init) == DECL_EXPR)
{
/* We need to jump through some hoops to handle declarations in the
init-statement, since we might need to handle auto deduction,
but we need to keep control of initialization. */
decl_expr = init;
init = DECL_INITIAL (DECL_EXPR_DECL (init));
decl = tsubst_decl (decl, args, complain);
}
else
{
if (TREE_CODE (decl) == SCOPE_REF)
{
decl = RECUR (decl);
if (TREE_CODE (decl) == COMPONENT_REF)
{
tree v = decl;
while (v)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& DECL_NAME (v) == this_identifier)
{
decl = TREE_OPERAND (decl, 1);
decl = omp_privatize_field (decl, false);
}
/* FALLTHRU */
default:
v = NULL_TREE;
break;
}
}
}
else
decl = RECUR (decl);
}
init = RECUR (init);
tree auto_node = type_uses_auto (TREE_TYPE (decl));
if (auto_node && init)
TREE_TYPE (decl)
= do_auto_deduction (TREE_TYPE (decl), init, auto_node);
gcc_assert (!type_dependent_expression_p (decl));
if (!CLASS_TYPE_P (TREE_TYPE (decl)))
{
if (decl_expr)
{
/* Declare the variable, but don't let that initialize it. */
tree init_sav = DECL_INITIAL (DECL_EXPR_DECL (decl_expr));
DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = NULL_TREE;
RECUR (decl_expr);
DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = init_sav;
}
cond = RECUR (TREE_VEC_ELT (OMP_FOR_COND (t), i));
incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i);
if (TREE_CODE (incr) == MODIFY_EXPR)
{
tree lhs = RECUR (TREE_OPERAND (incr, 0));
tree rhs = RECUR (TREE_OPERAND (incr, 1));
incr = build_x_modify_expr (EXPR_LOCATION (incr), lhs,
NOP_EXPR, rhs, complain);
}
else
incr = RECUR (incr);
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
return;
}
if (decl_expr)
{
/* Declare and initialize the variable. */
RECUR (decl_expr);
init = NULL_TREE;
}
else if (init)
{
tree *pc;
int j;
for (j = (omp_parallel_combined_clauses == NULL ? 1 : 0); j < 2; j++)
{
for (pc = j ? clauses : omp_parallel_combined_clauses; *pc; )
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*pc) == decl)
break;
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (*pc) == decl)
{
if (j)
break;
/* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */
tree c = *pc;
*pc = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = *clauses;
*clauses = c;
}
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (*pc) == decl)
{
error ("iteration variable %qD should not be firstprivate",
decl);
*pc = OMP_CLAUSE_CHAIN (*pc);
}
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_DECL (*pc) == decl)
{
error ("iteration variable %qD should not be reduction",
decl);
*pc = OMP_CLAUSE_CHAIN (*pc);
}
else
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (*pc)
break;
}
if (*pc == NULL_TREE)
{
tree c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
c = finish_omp_clauses (c, C_ORT_OMP);
if (c)
{
OMP_CLAUSE_CHAIN (c) = *clauses;
*clauses = c;
}
}
}
cond = TREE_VEC_ELT (OMP_FOR_COND (t), i);
if (COMPARISON_CLASS_P (cond))
{
tree op0 = RECUR (TREE_OPERAND (cond, 0));
tree op1 = RECUR (TREE_OPERAND (cond, 1));
cond = build2 (TREE_CODE (cond), boolean_type_node, op0, op1);
}
else
cond = RECUR (cond);
incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i);
switch (TREE_CODE (incr))
{
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
incr = build2 (TREE_CODE (incr), TREE_TYPE (decl),
RECUR (TREE_OPERAND (incr, 0)), NULL_TREE);
break;
case MODIFY_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
{
tree rhs = TREE_OPERAND (incr, 1);
tree lhs = RECUR (TREE_OPERAND (incr, 0));
tree rhs0 = RECUR (TREE_OPERAND (rhs, 0));
tree rhs1 = RECUR (TREE_OPERAND (rhs, 1));
incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
build2 (TREE_CODE (rhs), TREE_TYPE (decl),
rhs0, rhs1));
}
else
incr = RECUR (incr);
break;
case MODOP_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR)
{
tree lhs = RECUR (TREE_OPERAND (incr, 0));
incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
build2 (TREE_CODE (TREE_OPERAND (incr, 1)),
TREE_TYPE (decl), lhs,
RECUR (TREE_OPERAND (incr, 2))));
}
else if (TREE_CODE (TREE_OPERAND (incr, 1)) == NOP_EXPR
&& (TREE_CODE (TREE_OPERAND (incr, 2)) == PLUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 2)) == MINUS_EXPR)))
{
tree rhs = TREE_OPERAND (incr, 2);
tree lhs = RECUR (TREE_OPERAND (incr, 0));
tree rhs0 = RECUR (TREE_OPERAND (rhs, 0));
tree rhs1 = RECUR (TREE_OPERAND (rhs, 1));
incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs,
build2 (TREE_CODE (rhs), TREE_TYPE (decl),
rhs0, rhs1));
}
else
incr = RECUR (incr);
break;
default:
incr = RECUR (incr);
break;
}
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
#undef RECUR
}
/* Helper function of tsubst_expr, find OMP_TEAMS inside
of OMP_TARGET's body. */
static tree
tsubst_find_omp_teams (tree *tp, int *walk_subtrees, void *)
{
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_TEAMS:
return *tp;
case BIND_EXPR:
case STATEMENT_LIST:
*walk_subtrees = 1;
break;
default:
break;
}
return NULL_TREE;
}
/* Helper function for tsubst_expr. For decomposition declaration
artificial base DECL, which is tsubsted PATTERN_DECL, tsubst
also the corresponding decls representing the identifiers
of the decomposition declaration. Return DECL if successful
or error_mark_node otherwise, set *FIRST to the first decl
in the list chained through DECL_CHAIN and *CNT to the number
of such decls. */
static tree
tsubst_decomp_names (tree decl, tree pattern_decl, tree args,
tsubst_flags_t complain, tree in_decl, tree *first,
unsigned int *cnt)
{
tree decl2, decl3, prev = decl;
*cnt = 0;
gcc_assert (DECL_NAME (decl) == NULL_TREE);
for (decl2 = DECL_CHAIN (pattern_decl);
decl2
&& VAR_P (decl2)
&& DECL_DECOMPOSITION_P (decl2)
&& DECL_NAME (decl2);
decl2 = DECL_CHAIN (decl2))
{
if (TREE_TYPE (decl2) == error_mark_node && *cnt == 0)
{
gcc_assert (errorcount);
return error_mark_node;
}
(*cnt)++;
gcc_assert (DECL_HAS_VALUE_EXPR_P (decl2));
tree v = DECL_VALUE_EXPR (decl2);
DECL_HAS_VALUE_EXPR_P (decl2) = 0;
SET_DECL_VALUE_EXPR (decl2, NULL_TREE);
decl3 = tsubst (decl2, args, complain, in_decl);
SET_DECL_VALUE_EXPR (decl2, v);
DECL_HAS_VALUE_EXPR_P (decl2) = 1;
if (VAR_P (decl3))
DECL_TEMPLATE_INSTANTIATED (decl3) = 1;
maybe_push_decl (decl3);
if (error_operand_p (decl3))
decl = error_mark_node;
else if (decl != error_mark_node
&& DECL_CHAIN (decl3) != prev)
{
gcc_assert (errorcount);
decl = error_mark_node;
}
else
prev = decl3;
}
*first = prev;
return decl;
}
/* Like tsubst_copy for expressions, etc. but also does semantic
processing. */
tree
tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
bool integral_constant_expression_p)
{
#define RETURN(EXP) do { r = (EXP); goto out; } while(0)
#define RECUR(NODE) \
tsubst_expr ((NODE), args, complain, in_decl, \
integral_constant_expression_p)
tree stmt, tmp;
tree r;
location_t loc;
if (t == NULL_TREE || t == error_mark_node)
return t;
loc = input_location;
if (EXPR_HAS_LOCATION (t))
input_location = EXPR_LOCATION (t);
if (STATEMENT_CODE_P (TREE_CODE (t)))
current_stmt_tree ()->stmts_are_full_exprs_p = STMT_IS_FULL_EXPR_P (t);
switch (TREE_CODE (t))
{
case STATEMENT_LIST:
{
tree_stmt_iterator i;
for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i))
RECUR (tsi_stmt (i));
break;
}
case CTOR_INITIALIZER:
finish_mem_initializers (tsubst_initializer_list
(TREE_OPERAND (t, 0), args));
break;
case RETURN_EXPR:
finish_return_stmt (RECUR (TREE_OPERAND (t, 0)));
break;
case EXPR_STMT:
tmp = RECUR (EXPR_STMT_EXPR (t));
if (EXPR_STMT_STMT_EXPR_RESULT (t))
finish_stmt_expr_expr (tmp, cur_stmt_expr);
else
finish_expr_stmt (tmp);
break;
case USING_STMT:
do_using_directive (USING_STMT_NAMESPACE (t));
break;
case DECL_EXPR:
{
tree decl, pattern_decl;
tree init;
pattern_decl = decl = DECL_EXPR_DECL (t);
if (TREE_CODE (decl) == LABEL_DECL)
finish_label_decl (DECL_NAME (decl));
else if (TREE_CODE (decl) == USING_DECL)
{
tree scope = USING_DECL_SCOPE (decl);
tree name = DECL_NAME (decl);
scope = tsubst (scope, args, complain, in_decl);
decl = lookup_qualified_name (scope, name,
/*is_type_p=*/false,
/*complain=*/false);
if (decl == error_mark_node || TREE_CODE (decl) == TREE_LIST)
qualified_name_lookup_error (scope, name, decl, input_location);
else
do_local_using_decl (decl, scope, name);
}
else if (DECL_PACK_P (decl))
{
/* Don't build up decls for a variadic capture proxy, we'll
instantiate the elements directly as needed. */
break;
}
else
{
init = DECL_INITIAL (decl);
decl = tsubst (decl, args, complain, in_decl);
if (decl != error_mark_node)
{
/* By marking the declaration as instantiated, we avoid
trying to instantiate it. Since instantiate_decl can't
handle local variables, and since we've already done
all that needs to be done, that's the right thing to
do. */
if (VAR_P (decl))
DECL_TEMPLATE_INSTANTIATED (decl) = 1;
if (VAR_P (decl)
&& ANON_AGGR_TYPE_P (TREE_TYPE (decl)))
/* Anonymous aggregates are a special case. */
finish_anon_union (decl);
else if (is_capture_proxy (DECL_EXPR_DECL (t)))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl) == this_identifier)
{
tree lam = DECL_CONTEXT (current_function_decl);
lam = CLASSTYPE_LAMBDA_EXPR (lam);
LAMBDA_EXPR_THIS_CAPTURE (lam) = decl;
}
insert_capture_proxy (decl);
}
else if (DECL_IMPLICIT_TYPEDEF_P (t))
/* We already did a pushtag. */;
else if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_OMP_DECLARE_REDUCTION_P (decl)
&& DECL_FUNCTION_SCOPE_P (pattern_decl))
{
DECL_CONTEXT (decl) = NULL_TREE;
pushdecl (decl);
DECL_CONTEXT (decl) = current_function_decl;
cp_check_omp_declare_reduction (decl);
}
else
{
int const_init = false;
maybe_push_decl (decl);
if (VAR_P (decl)
&& DECL_PRETTY_FUNCTION_P (decl))
{
/* For __PRETTY_FUNCTION__ we have to adjust the
initializer. */
const char *const name
= cxx_printable_name (current_function_decl, 2);
init = cp_fname_init (name, &TREE_TYPE (decl));
}
else
init = tsubst_init (init, decl, args, complain, in_decl);
if (VAR_P (decl))
const_init = (DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P
(pattern_decl));
if (VAR_P (decl)
&& DECL_DECOMPOSITION_P (decl)
&& TREE_TYPE (pattern_decl) != error_mark_node)
{
unsigned int cnt;
tree first;
tree ndecl
= tsubst_decomp_names (decl, pattern_decl, args,
complain, in_decl, &first, &cnt);
if (ndecl != error_mark_node)
cp_maybe_mangle_decomp (ndecl, first, cnt);
cp_finish_decl (decl, init, const_init, NULL_TREE, 0);
if (ndecl != error_mark_node)
cp_finish_decomp (ndecl, first, cnt);
}
else
cp_finish_decl (decl, init, const_init, NULL_TREE, 0);
}
}
}
break;
}
case FOR_STMT:
stmt = begin_for_stmt (NULL_TREE, NULL_TREE);
RECUR (FOR_INIT_STMT (t));
finish_init_stmt (stmt);
tmp = RECUR (FOR_COND (t));
finish_for_cond (tmp, stmt, false);
tmp = RECUR (FOR_EXPR (t));
finish_for_expr (tmp, stmt);
RECUR (FOR_BODY (t));
finish_for_stmt (stmt);
break;
case RANGE_FOR_STMT:
{
tree decl, expr;
stmt = begin_for_stmt (NULL_TREE, NULL_TREE);
decl = RANGE_FOR_DECL (t);
decl = tsubst (decl, args, complain, in_decl);
maybe_push_decl (decl);
expr = RECUR (RANGE_FOR_EXPR (t));
if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl))
{
unsigned int cnt;
tree first;
decl = tsubst_decomp_names (decl, RANGE_FOR_DECL (t), args,
complain, in_decl, &first, &cnt);
stmt = cp_convert_range_for (stmt, decl, expr, first, cnt,
RANGE_FOR_IVDEP (t));
}
else
stmt = cp_convert_range_for (stmt, decl, expr, NULL_TREE, 0,
RANGE_FOR_IVDEP (t));
RECUR (RANGE_FOR_BODY (t));
finish_for_stmt (stmt);
}
break;
case WHILE_STMT:
stmt = begin_while_stmt ();
tmp = RECUR (WHILE_COND (t));
finish_while_stmt_cond (tmp, stmt, false);
RECUR (WHILE_BODY (t));
finish_while_stmt (stmt);
break;
case DO_STMT:
stmt = begin_do_stmt ();
RECUR (DO_BODY (t));
finish_do_body (stmt);
tmp = RECUR (DO_COND (t));
finish_do_stmt (tmp, stmt, false);
break;
case IF_STMT:
stmt = begin_if_stmt ();
IF_STMT_CONSTEXPR_P (stmt) = IF_STMT_CONSTEXPR_P (t);
tmp = RECUR (IF_COND (t));
tmp = finish_if_stmt_cond (tmp, stmt);
if (IF_STMT_CONSTEXPR_P (t) && integer_zerop (tmp))
/* Don't instantiate the THEN_CLAUSE. */;
else
{
bool inhibit = integer_zerop (fold_non_dependent_expr (tmp));
if (inhibit)
++c_inhibit_evaluation_warnings;
RECUR (THEN_CLAUSE (t));
if (inhibit)
--c_inhibit_evaluation_warnings;
}
finish_then_clause (stmt);
if (IF_STMT_CONSTEXPR_P (t) && integer_nonzerop (tmp))
/* Don't instantiate the ELSE_CLAUSE. */;
else if (ELSE_CLAUSE (t))
{
bool inhibit = integer_nonzerop (fold_non_dependent_expr (tmp));
begin_else_clause (stmt);
if (inhibit)
++c_inhibit_evaluation_warnings;
RECUR (ELSE_CLAUSE (t));
if (inhibit)
--c_inhibit_evaluation_warnings;
finish_else_clause (stmt);
}
finish_if_stmt (stmt);
break;
case BIND_EXPR:
if (BIND_EXPR_BODY_BLOCK (t))
stmt = begin_function_body ();
else
stmt = begin_compound_stmt (BIND_EXPR_TRY_BLOCK (t)
? BCS_TRY_BLOCK : 0);
RECUR (BIND_EXPR_BODY (t));
if (BIND_EXPR_BODY_BLOCK (t))
finish_function_body (stmt);
else
finish_compound_stmt (stmt);
break;
case BREAK_STMT:
finish_break_stmt ();
break;
case CONTINUE_STMT:
finish_continue_stmt ();
break;
case SWITCH_STMT:
stmt = begin_switch_stmt ();
tmp = RECUR (SWITCH_STMT_COND (t));
finish_switch_cond (tmp, stmt);
RECUR (SWITCH_STMT_BODY (t));
finish_switch_stmt (stmt);
break;
case CASE_LABEL_EXPR:
{
tree low = RECUR (CASE_LOW (t));
tree high = RECUR (CASE_HIGH (t));
tree l = finish_case_label (EXPR_LOCATION (t), low, high);
if (l && TREE_CODE (l) == CASE_LABEL_EXPR)
FALLTHROUGH_LABEL_P (CASE_LABEL (l))
= FALLTHROUGH_LABEL_P (CASE_LABEL (t));
}
break;
case LABEL_EXPR:
{
tree decl = LABEL_EXPR_LABEL (t);
tree label;
label = finish_label_stmt (DECL_NAME (decl));
if (TREE_CODE (label) == LABEL_DECL)
FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl);
if (DECL_ATTRIBUTES (decl) != NULL_TREE)
cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0);
}
break;
case GOTO_EXPR:
tmp = GOTO_DESTINATION (t);
if (TREE_CODE (tmp) != LABEL_DECL)
/* Computed goto's must be tsubst'd into. On the other hand,
non-computed gotos must not be; the identifier in question
will have no binding. */
tmp = RECUR (tmp);
else
tmp = DECL_NAME (tmp);
finish_goto_stmt (tmp);
break;
case ASM_EXPR:
{
tree string = RECUR (ASM_STRING (t));
tree outputs = tsubst_copy_asm_operands (ASM_OUTPUTS (t), args,
complain, in_decl);
tree inputs = tsubst_copy_asm_operands (ASM_INPUTS (t), args,
complain, in_decl);
tree clobbers = tsubst_copy_asm_operands (ASM_CLOBBERS (t), args,
complain, in_decl);
tree labels = tsubst_copy_asm_operands (ASM_LABELS (t), args,
complain, in_decl);
tmp = finish_asm_stmt (ASM_VOLATILE_P (t), string, outputs, inputs,
clobbers, labels);
tree asm_expr = tmp;
if (TREE_CODE (asm_expr) == CLEANUP_POINT_EXPR)
asm_expr = TREE_OPERAND (asm_expr, 0);
ASM_INPUT_P (asm_expr) = ASM_INPUT_P (t);
}
break;
case TRY_BLOCK:
if (CLEANUP_P (t))
{
stmt = begin_try_block ();
RECUR (TRY_STMTS (t));
finish_cleanup_try_block (stmt);
finish_cleanup (RECUR (TRY_HANDLERS (t)), stmt);
}
else
{
tree compound_stmt = NULL_TREE;
if (FN_TRY_BLOCK_P (t))
stmt = begin_function_try_block (&compound_stmt);
else
stmt = begin_try_block ();
RECUR (TRY_STMTS (t));
if (FN_TRY_BLOCK_P (t))
finish_function_try_block (stmt);
else
finish_try_block (stmt);
RECUR (TRY_HANDLERS (t));
if (FN_TRY_BLOCK_P (t))
finish_function_handler_sequence (stmt, compound_stmt);
else
finish_handler_sequence (stmt);
}
break;
case HANDLER:
{
tree decl = HANDLER_PARMS (t);
if (decl)
{
decl = tsubst (decl, args, complain, in_decl);
/* Prevent instantiate_decl from trying to instantiate
this variable. We've already done all that needs to be
done. */
if (decl != error_mark_node)
DECL_TEMPLATE_INSTANTIATED (decl) = 1;
}
stmt = begin_handler ();
finish_handler_parms (decl, stmt);
RECUR (HANDLER_BODY (t));
finish_handler (stmt);
}
break;
case TAG_DEFN:
tmp = tsubst (TREE_TYPE (t), args, complain, NULL_TREE);
if (CLASS_TYPE_P (tmp))
{
/* Local classes are not independent templates; they are
instantiated along with their containing function. And this
way we don't have to deal with pushing out of one local class
to instantiate a member of another local class. */
tree fn;
/* Closures are handled by the LAMBDA_EXPR. */
gcc_assert (!LAMBDA_TYPE_P (TREE_TYPE (t)));
complete_type (tmp);
for (fn = TYPE_METHODS (tmp); fn; fn = DECL_CHAIN (fn))
if (!DECL_ARTIFICIAL (fn))
instantiate_decl (fn, /*defer_ok=*/false,
/*expl_inst_class=*/false);
}
break;
case STATIC_ASSERT:
{
tree condition;
++c_inhibit_evaluation_warnings;
condition =
tsubst_expr (STATIC_ASSERT_CONDITION (t),
args,
complain, in_decl,
/*integral_constant_expression_p=*/true);
--c_inhibit_evaluation_warnings;
finish_static_assert (condition,
STATIC_ASSERT_MESSAGE (t),
STATIC_ASSERT_SOURCE_LOCATION (t),
/*member_p=*/false);
}
break;
case OACC_KERNELS:
case OACC_PARALLEL:
tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_ACC, args, complain,
in_decl);
stmt = begin_omp_parallel ();
RECUR (OMP_BODY (t));
finish_omp_construct (TREE_CODE (t), stmt, tmp);
break;
case OMP_PARALLEL:
r = push_omp_privatization_clauses (OMP_PARALLEL_COMBINED (t));
tmp = tsubst_omp_clauses (OMP_PARALLEL_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
if (OMP_PARALLEL_COMBINED (t))
omp_parallel_combined_clauses = &tmp;
stmt = begin_omp_parallel ();
RECUR (OMP_PARALLEL_BODY (t));
gcc_assert (omp_parallel_combined_clauses == NULL);
OMP_PARALLEL_COMBINED (finish_omp_parallel (tmp, stmt))
= OMP_PARALLEL_COMBINED (t);
pop_omp_privatization_clauses (r);
break;
case OMP_TASK:
r = push_omp_privatization_clauses (false);
tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
stmt = begin_omp_task ();
RECUR (OMP_TASK_BODY (t));
finish_omp_task (tmp, stmt);
pop_omp_privatization_clauses (r);
break;
case OMP_FOR:
case OMP_SIMD:
case CILK_SIMD:
case CILK_FOR:
case OMP_DISTRIBUTE:
case OMP_TASKLOOP:
case OACC_LOOP:
{
tree clauses, body, pre_body;
tree declv = NULL_TREE, initv = NULL_TREE, condv = NULL_TREE;
tree orig_declv = NULL_TREE;
tree incrv = NULL_TREE;
enum c_omp_region_type ort = C_ORT_OMP;
int i;
if (TREE_CODE (t) == CILK_SIMD || TREE_CODE (t) == CILK_FOR)
ort = C_ORT_CILK;
else if (TREE_CODE (t) == OACC_LOOP)
ort = C_ORT_ACC;
r = push_omp_privatization_clauses (OMP_FOR_INIT (t) == NULL_TREE);
clauses = tsubst_omp_clauses (OMP_FOR_CLAUSES (t), ort, args, complain,
in_decl);
if (OMP_FOR_INIT (t) != NULL_TREE)
{
declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
if (OMP_FOR_ORIG_DECLS (t))
orig_declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
initv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
condv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
incrv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t)));
}
stmt = begin_omp_structured_block ();
pre_body = push_stmt_list ();
RECUR (OMP_FOR_PRE_BODY (t));
pre_body = pop_stmt_list (pre_body);
if (OMP_FOR_INIT (t) != NULL_TREE)
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
tsubst_omp_for_iterator (t, i, declv, orig_declv, initv, condv,
incrv, &clauses, args, complain, in_decl,
integral_constant_expression_p);
omp_parallel_combined_clauses = NULL;
body = push_stmt_list ();
RECUR (OMP_FOR_BODY (t));
body = pop_stmt_list (body);
if (OMP_FOR_INIT (t) != NULL_TREE)
t = finish_omp_for (EXPR_LOCATION (t), TREE_CODE (t), declv,
orig_declv, initv, condv, incrv, body, pre_body,
NULL, clauses);
else
{
t = make_node (TREE_CODE (t));
TREE_TYPE (t) = void_type_node;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
OMP_FOR_CLAUSES (t) = clauses;
SET_EXPR_LOCATION (t, EXPR_LOCATION (t));
add_stmt (t);
}
add_stmt (finish_omp_structured_block (stmt));
pop_omp_privatization_clauses (r);
}
break;
case OMP_SECTIONS:
omp_parallel_combined_clauses = NULL;
/* FALLTHRU */
case OMP_SINGLE:
case OMP_TEAMS:
case OMP_CRITICAL:
r = push_omp_privatization_clauses (TREE_CODE (t) == OMP_TEAMS
&& OMP_TEAMS_COMBINED (t));
tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_OMP, args, complain,
in_decl);
stmt = push_stmt_list ();
RECUR (OMP_BODY (t));
stmt = pop_stmt_list (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
OMP_CLAUSES (t) = tmp;
add_stmt (t);
pop_omp_privatization_clauses (r);
break;
case OACC_DATA:
case OMP_TARGET_DATA:
case OMP_TARGET:
tmp = tsubst_omp_clauses (OMP_CLAUSES (t), (TREE_CODE (t) == OACC_DATA)
? C_ORT_ACC : C_ORT_OMP, args, complain,
in_decl);
keep_next_level (true);
stmt = begin_omp_structured_block ();
RECUR (OMP_BODY (t));
stmt = finish_omp_structured_block (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
OMP_CLAUSES (t) = tmp;
if (TREE_CODE (t) == OMP_TARGET && OMP_TARGET_COMBINED (t))
{
tree teams = cp_walk_tree (&stmt, tsubst_find_omp_teams, NULL, NULL);
if (teams)
{
/* For combined target teams, ensure the num_teams and
thread_limit clause expressions are evaluated on the host,
before entering the target construct. */
tree c;
for (c = OMP_TEAMS_CLAUSES (teams);
c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT)
&& TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST)
{
tree expr = OMP_CLAUSE_OPERAND (c, 0);
expr = force_target_expr (TREE_TYPE (expr), expr, tf_none);
if (expr == error_mark_node)
continue;
tmp = TARGET_EXPR_SLOT (expr);
add_stmt (expr);
OMP_CLAUSE_OPERAND (c, 0) = expr;
tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (tc) = tmp;
OMP_CLAUSE_CHAIN (tc) = OMP_TARGET_CLAUSES (t);
OMP_TARGET_CLAUSES (t) = tc;
}
}
}
add_stmt (t);
break;
case OACC_DECLARE:
t = copy_node (t);
tmp = tsubst_omp_clauses (OACC_DECLARE_CLAUSES (t), C_ORT_ACC, args,
complain, in_decl);
OACC_DECLARE_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OMP_TARGET_UPDATE:
case OMP_TARGET_ENTER_DATA:
case OMP_TARGET_EXIT_DATA:
tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
t = copy_node (t);
OMP_STANDALONE_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_UPDATE:
tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_ACC, args,
complain, in_decl);
t = copy_node (t);
OMP_STANDALONE_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OMP_ORDERED:
tmp = tsubst_omp_clauses (OMP_ORDERED_CLAUSES (t), C_ORT_OMP, args,
complain, in_decl);
stmt = push_stmt_list ();
RECUR (OMP_BODY (t));
stmt = pop_stmt_list (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
OMP_ORDERED_CLAUSES (t) = tmp;
add_stmt (t);
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_TASKGROUP:
stmt = push_stmt_list ();
RECUR (OMP_BODY (t));
stmt = pop_stmt_list (stmt);
t = copy_node (t);
OMP_BODY (t) = stmt;
add_stmt (t);
break;
case OMP_ATOMIC:
gcc_assert (OMP_ATOMIC_DEPENDENT_P (t));
if (TREE_CODE (TREE_OPERAND (t, 1)) != MODIFY_EXPR)
{
tree op1 = TREE_OPERAND (t, 1);
tree rhs1 = NULL_TREE;
tree lhs, rhs;
if (TREE_CODE (op1) == COMPOUND_EXPR)
{
rhs1 = RECUR (TREE_OPERAND (op1, 0));
op1 = TREE_OPERAND (op1, 1);
}
lhs = RECUR (TREE_OPERAND (op1, 0));
rhs = RECUR (TREE_OPERAND (op1, 1));
finish_omp_atomic (OMP_ATOMIC, TREE_CODE (op1), lhs, rhs,
NULL_TREE, NULL_TREE, rhs1,
OMP_ATOMIC_SEQ_CST (t));
}
else
{
tree op1 = TREE_OPERAND (t, 1);
tree v = NULL_TREE, lhs, rhs = NULL_TREE, lhs1 = NULL_TREE;
tree rhs1 = NULL_TREE;
enum tree_code code = TREE_CODE (TREE_OPERAND (op1, 1));
enum tree_code opcode = NOP_EXPR;
if (code == OMP_ATOMIC_READ)
{
v = RECUR (TREE_OPERAND (op1, 0));
lhs = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0));
}
else if (code == OMP_ATOMIC_CAPTURE_OLD
|| code == OMP_ATOMIC_CAPTURE_NEW)
{
tree op11 = TREE_OPERAND (TREE_OPERAND (op1, 1), 1);
v = RECUR (TREE_OPERAND (op1, 0));
lhs1 = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0));
if (TREE_CODE (op11) == COMPOUND_EXPR)
{
rhs1 = RECUR (TREE_OPERAND (op11, 0));
op11 = TREE_OPERAND (op11, 1);
}
lhs = RECUR (TREE_OPERAND (op11, 0));
rhs = RECUR (TREE_OPERAND (op11, 1));
opcode = TREE_CODE (op11);
if (opcode == MODIFY_EXPR)
opcode = NOP_EXPR;
}
else
{
code = OMP_ATOMIC;
lhs = RECUR (TREE_OPERAND (op1, 0));
rhs = RECUR (TREE_OPERAND (op1, 1));
}
finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1,
OMP_ATOMIC_SEQ_CST (t));
}
break;
case TRANSACTION_EXPR:
{
int flags = 0;
flags |= (TRANSACTION_EXPR_OUTER (t) ? TM_STMT_ATTR_OUTER : 0);
flags |= (TRANSACTION_EXPR_RELAXED (t) ? TM_STMT_ATTR_RELAXED : 0);
if (TRANSACTION_EXPR_IS_STMT (t))
{
tree body = TRANSACTION_EXPR_BODY (t);
tree noex = NULL_TREE;
if (TREE_CODE (body) == MUST_NOT_THROW_EXPR)
{
noex = MUST_NOT_THROW_COND (body);
if (noex == NULL_TREE)
noex = boolean_true_node;
body = TREE_OPERAND (body, 0);
}
stmt = begin_transaction_stmt (input_location, NULL, flags);
RECUR (body);
finish_transaction_stmt (stmt, NULL, flags, RECUR (noex));
}
else
{
stmt = build_transaction_expr (EXPR_LOCATION (t),
RECUR (TRANSACTION_EXPR_BODY (t)),
flags, NULL_TREE);
RETURN (stmt);
}
}
break;
case MUST_NOT_THROW_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree cond = RECUR (MUST_NOT_THROW_COND (t));
RETURN (build_must_not_throw_expr (op0, cond));
}
case EXPR_PACK_EXPANSION:
error ("invalid use of pack expansion expression");
RETURN (error_mark_node);
case NONTYPE_ARGUMENT_PACK:
error ("use %<...%> to expand argument pack");
RETURN (error_mark_node);
case CILK_SPAWN_STMT:
cfun->calls_cilk_spawn = 1;
RETURN (build_cilk_spawn (EXPR_LOCATION (t), RECUR (CILK_SPAWN_FN (t))));
case CILK_SYNC_STMT:
RETURN (build_cilk_sync ());
case COMPOUND_EXPR:
tmp = RECUR (TREE_OPERAND (t, 0));
if (tmp == NULL_TREE)
/* If the first operand was a statement, we're done with it. */
RETURN (RECUR (TREE_OPERAND (t, 1)));
RETURN (build_x_compound_expr (EXPR_LOCATION (t), tmp,
RECUR (TREE_OPERAND (t, 1)),
complain));
case ANNOTATE_EXPR:
tmp = RECUR (TREE_OPERAND (t, 0));
RETURN (build2_loc (EXPR_LOCATION (t), ANNOTATE_EXPR,
TREE_TYPE (tmp), tmp, RECUR (TREE_OPERAND (t, 1))));
default:
gcc_assert (!STATEMENT_CODE_P (TREE_CODE (t)));
RETURN (tsubst_copy_and_build (t, args, complain, in_decl,
/*function_p=*/false,
integral_constant_expression_p));
}
RETURN (NULL_TREE);
out:
input_location = loc;
return r;
#undef RECUR
#undef RETURN
}
/* Instantiate the special body of the artificial DECL_OMP_DECLARE_REDUCTION
function. For description of the body see comment above
cp_parser_omp_declare_reduction_exprs. */
static void
tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
{
if (t == NULL_TREE || t == error_mark_node)
return;
gcc_assert (TREE_CODE (t) == STATEMENT_LIST);
tree_stmt_iterator tsi;
int i;
tree stmts[7];
memset (stmts, 0, sizeof stmts);
for (i = 0, tsi = tsi_start (t);
i < 7 && !tsi_end_p (tsi);
i++, tsi_next (&tsi))
stmts[i] = tsi_stmt (tsi);
gcc_assert (tsi_end_p (tsi));
if (i >= 3)
{
gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR
&& TREE_CODE (stmts[1]) == DECL_EXPR);
tree omp_out = tsubst (DECL_EXPR_DECL (stmts[0]),
args, complain, in_decl);
tree omp_in = tsubst (DECL_EXPR_DECL (stmts[1]),
args, complain, in_decl);
DECL_CONTEXT (omp_out) = current_function_decl;
DECL_CONTEXT (omp_in) = current_function_decl;
keep_next_level (true);
tree block = begin_omp_structured_block ();
tsubst_expr (stmts[2], args, complain, in_decl, false);
block = finish_omp_structured_block (block);
block = maybe_cleanup_point_expr_void (block);
add_decl_expr (omp_out);
if (TREE_NO_WARNING (DECL_EXPR_DECL (stmts[0])))
TREE_NO_WARNING (omp_out) = 1;
add_decl_expr (omp_in);
finish_expr_stmt (block);
}
if (i >= 6)
{
gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR
&& TREE_CODE (stmts[4]) == DECL_EXPR);
tree omp_priv = tsubst (DECL_EXPR_DECL (stmts[3]),
args, complain, in_decl);
tree omp_orig = tsubst (DECL_EXPR_DECL (stmts[4]),
args, complain, in_decl);
DECL_CONTEXT (omp_priv) = current_function_decl;
DECL_CONTEXT (omp_orig) = current_function_decl;
keep_next_level (true);
tree block = begin_omp_structured_block ();
tsubst_expr (stmts[5], args, complain, in_decl, false);
block = finish_omp_structured_block (block);
block = maybe_cleanup_point_expr_void (block);
cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL);
add_decl_expr (omp_priv);
add_decl_expr (omp_orig);
finish_expr_stmt (block);
if (i == 7)
add_decl_expr (omp_orig);
}
}
/* T is a postfix-expression that is not being used in a function
call. Return the substituted version of T. */
static tree
tsubst_non_call_postfix_expression (tree t, tree args,
tsubst_flags_t complain,
tree in_decl)
{
if (TREE_CODE (t) == SCOPE_REF)
t = tsubst_qualified_id (t, args, complain, in_decl,
/*done=*/false, /*address_p=*/false);
else
t = tsubst_copy_and_build (t, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
return t;
}
/* Like tsubst but deals with expressions and performs semantic
analysis. FUNCTION_P is true if T is the "F" in "F (ARGS)". */
tree
tsubst_copy_and_build (tree t,
tree args,
tsubst_flags_t complain,
tree in_decl,
bool function_p,
bool integral_constant_expression_p)
{
#define RETURN(EXP) do { retval = (EXP); goto out; } while(0)
#define RECUR(NODE) \
tsubst_copy_and_build (NODE, args, complain, in_decl, \
/*function_p=*/false, \
integral_constant_expression_p)
tree retval, op1;
location_t loc;
if (t == NULL_TREE || t == error_mark_node)
return t;
loc = input_location;
if (EXPR_HAS_LOCATION (t))
input_location = EXPR_LOCATION (t);
/* N3276 decltype magic only applies to calls at the top level or on the
right side of a comma. */
tsubst_flags_t decltype_flag = (complain & tf_decltype);
complain &= ~tf_decltype;
switch (TREE_CODE (t))
{
case USING_DECL:
t = DECL_NAME (t);
/* Fall through. */
case IDENTIFIER_NODE:
{
tree decl;
cp_id_kind idk;
bool non_integral_constant_expression_p;
const char *error_msg;
if (IDENTIFIER_TYPENAME_P (t))
{
tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
t = mangle_conv_op_name_for_type (new_type);
}
/* Look up the name. */
decl = lookup_name (t);
/* By convention, expressions use ERROR_MARK_NODE to indicate
failure, not NULL_TREE. */
if (decl == NULL_TREE)
decl = error_mark_node;
decl = finish_id_expression (t, decl, NULL_TREE,
&idk,
integral_constant_expression_p,
/*allow_non_integral_constant_expression_p=*/(cxx_dialect >= cxx11),
&non_integral_constant_expression_p,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
input_location);
if (error_msg)
error (error_msg);
if (!function_p && identifier_p (decl))
{
if (complain & tf_error)
unqualified_name_lookup_error (decl);
decl = error_mark_node;
}
RETURN (decl);
}
case TEMPLATE_ID_EXPR:
{
tree object;
tree templ = RECUR (TREE_OPERAND (t, 0));
tree targs = TREE_OPERAND (t, 1);
if (targs)
targs = tsubst_template_args (targs, args, complain, in_decl);
if (targs == error_mark_node)
return error_mark_node;
if (TREE_CODE (templ) == SCOPE_REF)
{
tree name = TREE_OPERAND (templ, 1);
tree tid = lookup_template_function (name, targs);
TREE_OPERAND (templ, 1) = tid;
return templ;
}
if (variable_template_p (templ))
RETURN (lookup_and_finish_template_variable (templ, targs, complain));
if (TREE_CODE (templ) == COMPONENT_REF)
{
object = TREE_OPERAND (templ, 0);
templ = TREE_OPERAND (templ, 1);
}
else
object = NULL_TREE;
templ = lookup_template_function (templ, targs);
if (object)
RETURN (build3 (COMPONENT_REF, TREE_TYPE (templ),
object, templ, NULL_TREE));
else
RETURN (baselink_for_fns (templ));
}
case INDIRECT_REF:
{
tree r = RECUR (TREE_OPERAND (t, 0));
if (REFERENCE_REF_P (t))
{
/* A type conversion to reference type will be enclosed in
such an indirect ref, but the substitution of the cast
will have also added such an indirect ref. */
r = convert_from_reference (r);
}
else
r = build_x_indirect_ref (input_location, r, RO_UNARY_STAR,
complain|decltype_flag);
if (TREE_CODE (r) == INDIRECT_REF)
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t);
RETURN (r);
}
case NOP_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = RECUR (TREE_OPERAND (t, 0));
RETURN (build_nop (type, op0));
}
case IMPLICIT_CONV_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree expr = RECUR (TREE_OPERAND (t, 0));
int flags = LOOKUP_IMPLICIT;
if (IMPLICIT_CONV_EXPR_DIRECT_INIT (t))
flags = LOOKUP_NORMAL;
RETURN (perform_implicit_conversion_flags (type, expr, complain,
flags));
}
case CONVERT_EXPR:
{
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
tree op0 = RECUR (TREE_OPERAND (t, 0));
RETURN (build1 (CONVERT_EXPR, type, op0));
}
case CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
case DYNAMIC_CAST_EXPR:
case STATIC_CAST_EXPR:
{
tree type;
tree op, r = NULL_TREE;
type = tsubst (TREE_TYPE (t), args, complain, in_decl);
if (integral_constant_expression_p
&& !cast_valid_in_integral_constant_expression_p (type))
{
if (complain & tf_error)
error ("a cast to a type other than an integral or "
"enumeration type cannot appear in a constant-expression");
RETURN (error_mark_node);
}
op = RECUR (TREE_OPERAND (t, 0));
warning_sentinel s(warn_useless_cast);
switch (TREE_CODE (t))
{
case CAST_EXPR:
r = build_functional_cast (type, op, complain);
break;
case REINTERPRET_CAST_EXPR:
r = build_reinterpret_cast (type, op, complain);
break;
case CONST_CAST_EXPR:
r = build_const_cast (type, op, complain);
break;
case DYNAMIC_CAST_EXPR:
r = build_dynamic_cast (type, op, complain);
break;
case STATIC_CAST_EXPR:
r = build_static_cast (type, op, complain);
break;
default:
gcc_unreachable ();
}
RETURN (r);
}
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
RETURN (build_x_unary_op (input_location, TREE_CODE (t), op1,
complain|decltype_flag));
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case NEGATE_EXPR:
case BIT_NOT_EXPR:
case ABS_EXPR:
case TRUTH_NOT_EXPR:
case UNARY_PLUS_EXPR: /* Unary + */
case REALPART_EXPR:
case IMAGPART_EXPR:
RETURN (build_x_unary_op (input_location, TREE_CODE (t),
RECUR (TREE_OPERAND (t, 0)),
complain|decltype_flag));
case FIX_TRUNC_EXPR:
RETURN (cp_build_unary_op (FIX_TRUNC_EXPR, RECUR (TREE_OPERAND (t, 0)),
false, complain));
case ADDR_EXPR:
op1 = TREE_OPERAND (t, 0);
if (TREE_CODE (op1) == LABEL_DECL)
RETURN (finish_label_address_expr (DECL_NAME (op1),
EXPR_LOCATION (op1)));
if (TREE_CODE (op1) == SCOPE_REF)
op1 = tsubst_qualified_id (op1, args, complain, in_decl,
/*done=*/true, /*address_p=*/true);
else
op1 = tsubst_non_call_postfix_expression (op1, args, complain,
in_decl);
RETURN (build_x_unary_op (input_location, ADDR_EXPR, op1,
complain|decltype_flag));
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case RSHIFT_EXPR:
case LSHIFT_EXPR:
case RROTATE_EXPR:
case LROTATE_EXPR:
case EQ_EXPR:
case NE_EXPR:
case MAX_EXPR:
case MIN_EXPR:
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
case MEMBER_REF:
case DOTSTAR_EXPR:
{
warning_sentinel s1(warn_type_limits);
warning_sentinel s2(warn_div_by_zero);
warning_sentinel s3(warn_logical_op);
warning_sentinel s4(warn_tautological_compare);
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
tree r = build_x_binary_op
(input_location, TREE_CODE (t),
op0,
(TREE_NO_WARNING (TREE_OPERAND (t, 0))
? ERROR_MARK
: TREE_CODE (TREE_OPERAND (t, 0))),
op1,
(TREE_NO_WARNING (TREE_OPERAND (t, 1))
? ERROR_MARK
: TREE_CODE (TREE_OPERAND (t, 1))),
/*overload=*/NULL,
complain|decltype_flag);
if (EXPR_P (r) && TREE_NO_WARNING (t))
TREE_NO_WARNING (r) = TREE_NO_WARNING (t);
RETURN (r);
}
case POINTER_PLUS_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
return fold_build_pointer_plus (op0, op1);
}
case SCOPE_REF:
RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true,
/*address_p=*/false));
case ARRAY_REF:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
RETURN (build_x_array_ref (EXPR_LOCATION (t), op1,
RECUR (TREE_OPERAND (t, 1)),
complain|decltype_flag));
case ARRAY_NOTATION_REF:
{
tree start_index, length, stride;
op1 = tsubst_non_call_postfix_expression (ARRAY_NOTATION_ARRAY (t),
args, complain, in_decl);
start_index = RECUR (ARRAY_NOTATION_START (t));
length = RECUR (ARRAY_NOTATION_LENGTH (t));
stride = RECUR (ARRAY_NOTATION_STRIDE (t));
RETURN (build_array_notation_ref (EXPR_LOCATION (t), op1, start_index,
length, stride, TREE_TYPE (op1)));
}
case SIZEOF_EXPR:
if (PACK_EXPANSION_P (TREE_OPERAND (t, 0))
|| ARGUMENT_PACK_P (TREE_OPERAND (t, 0)))
RETURN (tsubst_copy (t, args, complain, in_decl));
/* Fall through */
case ALIGNOF_EXPR:
{
tree r;
op1 = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t))
op1 = TREE_TYPE (op1);
if (!args)
{
/* When there are no ARGS, we are trying to evaluate a
non-dependent expression from the parser. Trying to do
the substitutions may not work. */
if (!TYPE_P (op1))
op1 = TREE_TYPE (op1);
}
else
{
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
if (TYPE_P (op1))
op1 = tsubst (op1, args, complain, in_decl);
else
op1 = tsubst_copy_and_build (op1, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/
false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
}
if (TYPE_P (op1))
r = cxx_sizeof_or_alignof_type (op1, TREE_CODE (t),
complain & tf_error);
else
r = cxx_sizeof_or_alignof_expr (op1, TREE_CODE (t),
complain & tf_error);
if (TREE_CODE (t) == SIZEOF_EXPR && r != error_mark_node)
{
if (TREE_CODE (r) != SIZEOF_EXPR || TYPE_P (op1))
{
if (!processing_template_decl && TYPE_P (op1))
{
r = build_min (SIZEOF_EXPR, size_type_node,
build1 (NOP_EXPR, op1, error_mark_node));
SIZEOF_EXPR_TYPE_P (r) = 1;
}
else
r = build_min (SIZEOF_EXPR, size_type_node, op1);
TREE_SIDE_EFFECTS (r) = 0;
TREE_READONLY (r) = 1;
}
SET_EXPR_LOCATION (r, EXPR_LOCATION (t));
}
RETURN (r);
}
case AT_ENCODE_EXPR:
{
op1 = TREE_OPERAND (t, 0);
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
op1 = tsubst_copy_and_build (op1, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
RETURN (objc_build_encode_expr (op1));
}
case NOEXCEPT_EXPR:
op1 = TREE_OPERAND (t, 0);
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
++cp_noexcept_operand;
op1 = tsubst_copy_and_build (op1, args, complain, in_decl,
/*function_p=*/false,
/*integral_constant_expression_p=*/false);
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
--cp_noexcept_operand;
RETURN (finish_noexcept_expr (op1, complain));
case MODOP_EXPR:
{
warning_sentinel s(warn_div_by_zero);
tree lhs = RECUR (TREE_OPERAND (t, 0));
tree rhs = RECUR (TREE_OPERAND (t, 2));
tree r = build_x_modify_expr
(EXPR_LOCATION (t), lhs, TREE_CODE (TREE_OPERAND (t, 1)), rhs,
complain|decltype_flag);
/* TREE_NO_WARNING must be set if either the expression was
parenthesized or it uses an operator such as >>= rather
than plain assignment. In the former case, it was already
set and must be copied. In the latter case,
build_x_modify_expr sets it and it must not be reset
here. */
if (TREE_NO_WARNING (t))
TREE_NO_WARNING (r) = TREE_NO_WARNING (t);
RETURN (r);
}
case ARROW_EXPR:
op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
/* Remember that there was a reference to this entity. */
if (DECL_P (op1)
&& !mark_used (op1, complain) && !(complain & tf_error))
RETURN (error_mark_node);
RETURN (build_x_arrow (input_location, op1, complain));
case NEW_EXPR:
{
tree placement = RECUR (TREE_OPERAND (t, 0));
tree init = RECUR (TREE_OPERAND (t, 3));
vec<tree, va_gc> *placement_vec;
vec<tree, va_gc> *init_vec;
tree ret;
if (placement == NULL_TREE)
placement_vec = NULL;
else
{
placement_vec = make_tree_vector ();
for (; placement != NULL_TREE; placement = TREE_CHAIN (placement))
vec_safe_push (placement_vec, TREE_VALUE (placement));
}
/* If there was an initializer in the original tree, but it
instantiated to an empty list, then we should pass a
non-NULL empty vector to tell build_new that it was an
empty initializer() rather than no initializer. This can
only happen when the initializer is a pack expansion whose
parameter packs are of length zero. */
if (init == NULL_TREE && TREE_OPERAND (t, 3) == NULL_TREE)
init_vec = NULL;
else
{
init_vec = make_tree_vector ();
if (init == void_node)
gcc_assert (init_vec != NULL);
else
{
for (; init != NULL_TREE; init = TREE_CHAIN (init))
vec_safe_push (init_vec, TREE_VALUE (init));
}
}
tree op1 = tsubst (TREE_OPERAND (t, 1), args, complain, in_decl);
tree op2 = RECUR (TREE_OPERAND (t, 2));
ret = build_new (&placement_vec, op1, op2, &init_vec,
NEW_EXPR_USE_GLOBAL (t),
complain);
if (placement_vec != NULL)
release_tree_vector (placement_vec);
if (init_vec != NULL)
release_tree_vector (init_vec);
RETURN (ret);
}
case DELETE_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
RETURN (delete_sanity (op0, op1,
DELETE_EXPR_USE_VEC (t),
DELETE_EXPR_USE_GLOBAL (t),
complain));
}
case COMPOUND_EXPR:
{
tree op0 = tsubst_copy_and_build (TREE_OPERAND (t, 0), args,
complain & ~tf_decltype, in_decl,
/*function_p=*/false,
integral_constant_expression_p);
RETURN (build_x_compound_expr (EXPR_LOCATION (t),
op0,
RECUR (TREE_OPERAND (t, 1)),
complain|decltype_flag));
}
case CALL_EXPR:
{
tree function;
vec<tree, va_gc> *call_args;
unsigned int nargs, i;
bool qualified_p;
bool koenig_p;
tree ret;
function = CALL_EXPR_FN (t);
/* Internal function with no arguments. */
if (function == NULL_TREE && call_expr_nargs (t) == 0)
RETURN (t);
/* When we parsed the expression, we determined whether or
not Koenig lookup should be performed. */
koenig_p = KOENIG_LOOKUP_P (t);
if (function == NULL_TREE)
{
koenig_p = false;
qualified_p = false;
}
else if (TREE_CODE (function) == SCOPE_REF)
{
qualified_p = true;
function = tsubst_qualified_id (function, args, complain, in_decl,
/*done=*/false,
/*address_p=*/false);
}
else if (koenig_p && identifier_p (function))
{
/* Do nothing; calling tsubst_copy_and_build on an identifier
would incorrectly perform unqualified lookup again.
Note that we can also have an IDENTIFIER_NODE if the earlier
unqualified lookup found a member function; in that case
koenig_p will be false and we do want to do the lookup
again to find the instantiated member function.
FIXME but doing that causes c++/15272, so we need to stop
using IDENTIFIER_NODE in that situation. */
qualified_p = false;
}
else
{
if (TREE_CODE (function) == COMPONENT_REF)
{
tree op = TREE_OPERAND (function, 1);
qualified_p = (TREE_CODE (op) == SCOPE_REF
|| (BASELINK_P (op)
&& BASELINK_QUALIFIED_P (op)));
}
else
qualified_p = false;
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
/* Avoid error about taking the address of a constructor. */
function = TREE_OPERAND (function, 0);
function = tsubst_copy_and_build (function, args, complain,
in_decl,
!qualified_p,
integral_constant_expression_p);
if (BASELINK_P (function))
qualified_p = true;
}
nargs = call_expr_nargs (t);
call_args = make_tree_vector ();
for (i = 0; i < nargs; ++i)
{
tree arg = CALL_EXPR_ARG (t, i);
if (!PACK_EXPANSION_P (arg))
vec_safe_push (call_args, RECUR (CALL_EXPR_ARG (t, i)));
else
{
/* Expand the pack expansion and push each entry onto
CALL_ARGS. */
arg = tsubst_pack_expansion (arg, args, complain, in_decl);
if (TREE_CODE (arg) == TREE_VEC)
{
unsigned int len, j;
len = TREE_VEC_LENGTH (arg);
for (j = 0; j < len; ++j)
{
tree value = TREE_VEC_ELT (arg, j);
if (value != NULL_TREE)
value = convert_from_reference (value);
vec_safe_push (call_args, value);
}
}
else
{
/* A partial substitution. Add one entry. */
vec_safe_push (call_args, arg);
}
}
}
/* We do not perform argument-dependent lookup if normal
lookup finds a non-function, in accordance with the
expected resolution of DR 218. */
if (koenig_p
&& ((is_overloaded_fn (function)
/* If lookup found a member function, the Koenig lookup is
not appropriate, even if an unqualified-name was used
to denote the function. */
&& !DECL_FUNCTION_MEMBER_P (get_first_fn (function)))
|| identifier_p (function))
/* Only do this when substitution turns a dependent call
into a non-dependent call. */
&& type_dependent_expression_p_push (t)
&& !any_type_dependent_arguments_p (call_args))
function = perform_koenig_lookup (function, call_args, tf_none);
if (function != NULL_TREE
&& identifier_p (function)
&& !any_type_dependent_arguments_p (call_args))
{
if (koenig_p && (complain & tf_warning_or_error))
{
/* For backwards compatibility and good diagnostics, try
the unqualified lookup again if we aren't in SFINAE
context. */
tree unq = (tsubst_copy_and_build
(function, args, complain, in_decl, true,
integral_constant_expression_p));
if (unq == error_mark_node)
{
release_tree_vector (call_args);
RETURN (error_mark_node);
}
if (unq != function)
{
/* In a lambda fn, we have to be careful to not
introduce new this captures. Legacy code can't
be using lambdas anyway, so it's ok to be
stricter. */
bool in_lambda = (current_class_type
&& LAMBDA_TYPE_P (current_class_type));
char const *const msg
= G_("%qD was not declared in this scope, "
"and no declarations were found by "
"argument-dependent lookup at the point "
"of instantiation");
bool diag = true;
if (in_lambda)
error_at (EXPR_LOC_OR_LOC (t, input_location),
msg, function);
else
diag = permerror (EXPR_LOC_OR_LOC (t, input_location),
msg, function);
if (diag)
{
tree fn = unq;
if (INDIRECT_REF_P (fn))
fn = TREE_OPERAND (fn, 0);
if (TREE_CODE (fn) == COMPONENT_REF)
fn = TREE_OPERAND (fn, 1);
if (is_overloaded_fn (fn))
fn = get_first_fn (fn);
if (!DECL_P (fn))
/* Can't say anything more. */;
else if (DECL_CLASS_SCOPE_P (fn))
{
location_t loc = EXPR_LOC_OR_LOC (t,
input_location);
inform (loc,
"declarations in dependent base %qT are "
"not found by unqualified lookup",
DECL_CLASS_CONTEXT (fn));
if (current_class_ptr)
inform (loc,
"use %<this->%D%> instead", function);
else
inform (loc,
"use %<%T::%D%> instead",
current_class_name, function);
}
else
inform (DECL_SOURCE_LOCATION (fn),
"%qD declared here, later in the "
"translation unit", fn);
if (in_lambda)
{
release_tree_vector (call_args);
RETURN (error_mark_node);
}
}
function = unq;
}
}
if (identifier_p (function))
{
if (complain & tf_error)
unqualified_name_lookup_error (function);
release_tree_vector (call_args);
RETURN (error_mark_node);
}
}
/* Remember that there was a reference to this entity. */
if (function != NULL_TREE
&& DECL_P (function)
&& !mark_used (function, complain) && !(complain & tf_error))
{
release_tree_vector (call_args);
RETURN (error_mark_node);
}
/* Put back tf_decltype for the actual call. */
complain |= decltype_flag;
if (function == NULL_TREE)
switch (CALL_EXPR_IFN (t))
{
case IFN_LAUNDER:
gcc_assert (nargs == 1);
if (vec_safe_length (call_args) != 1)
{
error_at (EXPR_LOC_OR_LOC (t, input_location),
"wrong number of arguments to "
"%<__builtin_launder%>");
ret = error_mark_node;
}
else
ret = finish_builtin_launder (EXPR_LOC_OR_LOC (t,
input_location),
(*call_args)[0], complain);
break;
default:
/* Unsupported internal function with arguments. */
gcc_unreachable ();
}
else if (TREE_CODE (function) == OFFSET_REF)
ret = build_offset_ref_call_from_tree (function, &call_args,
complain);
else if (TREE_CODE (function) == COMPONENT_REF)
{
tree instance = TREE_OPERAND (function, 0);
tree fn = TREE_OPERAND (function, 1);
if (processing_template_decl
&& (type_dependent_expression_p (instance)
|| (!BASELINK_P (fn)
&& TREE_CODE (fn) != FIELD_DECL)
|| type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (call_args)))
ret = build_nt_call_vec (function, call_args);
else if (!BASELINK_P (fn))
ret = finish_call_expr (function, &call_args,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
complain);
else
ret = (build_new_method_call
(instance, fn,
&call_args, NULL_TREE,
qualified_p ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL,
/*fn_p=*/NULL,
complain));
}
else
ret = finish_call_expr (function, &call_args,
/*disallow_virtual=*/qualified_p,
koenig_p,
complain);
release_tree_vector (call_args);
if (ret != error_mark_node)
{
bool op = CALL_EXPR_OPERATOR_SYNTAX (t);
bool ord = CALL_EXPR_ORDERED_ARGS (t);
bool rev = CALL_EXPR_REVERSE_ARGS (t);
bool thk = CALL_FROM_THUNK_P (t);
if (op || ord || rev || thk)
{
function = extract_call_expr (ret);
CALL_EXPR_OPERATOR_SYNTAX (function) = op;
CALL_EXPR_ORDERED_ARGS (function) = ord;
CALL_EXPR_REVERSE_ARGS (function) = rev;
if (thk)
{
if (TREE_CODE (function) == CALL_EXPR)
CALL_FROM_THUNK_P (function) = true;
else
AGGR_INIT_FROM_THUNK_P (function) = true;
/* The thunk location is not interesting. */
SET_EXPR_LOCATION (function, UNKNOWN_LOCATION);
}
}
}
RETURN (ret);
}
case COND_EXPR:
{
tree cond = RECUR (TREE_OPERAND (t, 0));
tree folded_cond = fold_non_dependent_expr (cond);
tree exp1, exp2;
if (TREE_CODE (folded_cond) == INTEGER_CST)
{
if (integer_zerop (folded_cond))
{
++c_inhibit_evaluation_warnings;
exp1 = RECUR (TREE_OPERAND (t, 1));
--c_inhibit_evaluation_warnings;
exp2 = RECUR (TREE_OPERAND (t, 2));
}
else
{
exp1 = RECUR (TREE_OPERAND (t, 1));
++c_inhibit_evaluation_warnings;
exp2 = RECUR (TREE_OPERAND (t, 2));
--c_inhibit_evaluation_warnings;
}
cond = folded_cond;
}
else
{
exp1 = RECUR (TREE_OPERAND (t, 1));
exp2 = RECUR (TREE_OPERAND (t, 2));
}
RETURN (build_x_conditional_expr (EXPR_LOCATION (t),
cond, exp1, exp2, complain));
}
case PSEUDO_DTOR_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
tree op2 = tsubst (TREE_OPERAND (t, 2), args, complain, in_decl);
RETURN (finish_pseudo_destructor_expr (op0, op1, op2,
input_location));
}
case TREE_LIST:
{
tree purpose, value, chain;
if (t == void_list_node)
RETURN (t);
if ((TREE_PURPOSE (t) && PACK_EXPANSION_P (TREE_PURPOSE (t)))
|| (TREE_VALUE (t) && PACK_EXPANSION_P (TREE_VALUE (t))))
{
/* We have pack expansions, so expand those and
create a new list out of it. */
tree purposevec = NULL_TREE;
tree valuevec = NULL_TREE;
tree chain;
int i, len = -1;
/* Expand the argument expressions. */
if (TREE_PURPOSE (t))
purposevec = tsubst_pack_expansion (TREE_PURPOSE (t), args,
complain, in_decl);
if (TREE_VALUE (t))
valuevec = tsubst_pack_expansion (TREE_VALUE (t), args,
complain, in_decl);
/* Build the rest of the list. */
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = RECUR (chain);
/* Determine the number of arguments. */
if (purposevec && TREE_CODE (purposevec) == TREE_VEC)
{
len = TREE_VEC_LENGTH (purposevec);
gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec));
}
else if (TREE_CODE (valuevec) == TREE_VEC)
len = TREE_VEC_LENGTH (valuevec);
else
{
/* Since we only performed a partial substitution into
the argument pack, we only RETURN (a single list
node. */
if (purposevec == TREE_PURPOSE (t)
&& valuevec == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
RETURN (t);
RETURN (tree_cons (purposevec, valuevec, chain));
}
/* Convert the argument vectors into a TREE_LIST */
i = len;
while (i > 0)
{
/* Grab the Ith values. */
i--;
purpose = purposevec ? TREE_VEC_ELT (purposevec, i)
: NULL_TREE;
value
= valuevec ? convert_from_reference (TREE_VEC_ELT (valuevec, i))
: NULL_TREE;
/* Build the list (backwards). */
chain = tree_cons (purpose, value, chain);
}
RETURN (chain);
}
purpose = TREE_PURPOSE (t);
if (purpose)
purpose = RECUR (purpose);
value = TREE_VALUE (t);
if (value)
value = RECUR (value);
chain = TREE_CHAIN (t);
if (chain && chain != void_type_node)
chain = RECUR (chain);
if (purpose == TREE_PURPOSE (t)
&& value == TREE_VALUE (t)
&& chain == TREE_CHAIN (t))
RETURN (t);
RETURN (tree_cons (purpose, value, chain));
}
case COMPONENT_REF:
{
tree object;
tree object_type;
tree member;
tree r;
object = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0),
args, complain, in_decl);
/* Remember that there was a reference to this entity. */
if (DECL_P (object)
&& !mark_used (object, complain) && !(complain & tf_error))
RETURN (error_mark_node);
object_type = TREE_TYPE (object);
member = TREE_OPERAND (t, 1);
if (BASELINK_P (member))
member = tsubst_baselink (member,
non_reference (TREE_TYPE (object)),
args, complain, in_decl);
else
member = tsubst_copy (member, args, complain, in_decl);
if (member == error_mark_node)
RETURN (error_mark_node);
if (TREE_CODE (member) == FIELD_DECL)
{
r = finish_non_static_data_member (member, object, NULL_TREE);
if (TREE_CODE (r) == COMPONENT_REF)
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t);
RETURN (r);
}
else if (type_dependent_expression_p (object))
/* We can't do much here. */;
else if (!CLASS_TYPE_P (object_type))
{
if (scalarish_type_p (object_type))
{
tree s = NULL_TREE;
tree dtor = member;
if (TREE_CODE (dtor) == SCOPE_REF)
{
s = TREE_OPERAND (dtor, 0);
dtor = TREE_OPERAND (dtor, 1);
}
if (TREE_CODE (dtor) == BIT_NOT_EXPR)
{
dtor = TREE_OPERAND (dtor, 0);
if (TYPE_P (dtor))
RETURN (finish_pseudo_destructor_expr
(object, s, dtor, input_location));
}
}
}
else if (TREE_CODE (member) == SCOPE_REF
&& TREE_CODE (TREE_OPERAND (member, 1)) == TEMPLATE_ID_EXPR)
{
/* Lookup the template functions now that we know what the
scope is. */
tree scope = TREE_OPERAND (member, 0);
tree tmpl = TREE_OPERAND (TREE_OPERAND (member, 1), 0);
tree args = TREE_OPERAND (TREE_OPERAND (member, 1), 1);
member = lookup_qualified_name (scope, tmpl,
/*is_type_p=*/false,
/*complain=*/false);
if (BASELINK_P (member))
{
BASELINK_FUNCTIONS (member)
= build_nt (TEMPLATE_ID_EXPR, BASELINK_FUNCTIONS (member),
args);
member = (adjust_result_of_qualified_name_lookup
(member, BINFO_TYPE (BASELINK_BINFO (member)),
object_type));
}
else
{
qualified_name_lookup_error (scope, tmpl, member,
input_location);
RETURN (error_mark_node);
}
}
else if (TREE_CODE (member) == SCOPE_REF
&& !CLASS_TYPE_P (TREE_OPERAND (member, 0))
&& TREE_CODE (TREE_OPERAND (member, 0)) != NAMESPACE_DECL)
{
if (complain & tf_error)
{
if (TYPE_P (TREE_OPERAND (member, 0)))
error ("%qT is not a class or namespace",
TREE_OPERAND (member, 0));
else
error ("%qD is not a class or namespace",
TREE_OPERAND (member, 0));
}
RETURN (error_mark_node);
}
r = finish_class_member_access_expr (object, member,
/*template_p=*/false,
complain);
if (TREE_CODE (r) == COMPONENT_REF)
REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t);
RETURN (r);
}
case THROW_EXPR:
RETURN (build_throw
(RECUR (TREE_OPERAND (t, 0))));
case CONSTRUCTOR:
{
vec<constructor_elt, va_gc> *n;
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
bool process_index_p;
int newlen;
bool need_copy_p = false;
tree r;
if (type == error_mark_node)
RETURN (error_mark_node);
/* digest_init will do the wrong thing if we let it. */
if (type && TYPE_PTRMEMFUNC_P (type))
RETURN (t);
/* We do not want to process the index of aggregate
initializers as they are identifier nodes which will be
looked up by digest_init. */
process_index_p = !(type && MAYBE_CLASS_TYPE_P (type));
n = vec_safe_copy (CONSTRUCTOR_ELTS (t));
newlen = vec_safe_length (n);
FOR_EACH_VEC_SAFE_ELT (n, idx, ce)
{
if (ce->index && process_index_p
/* An identifier index is looked up in the type
being initialized, not the current scope. */
&& TREE_CODE (ce->index) != IDENTIFIER_NODE)
ce->index = RECUR (ce->index);
if (PACK_EXPANSION_P (ce->value))
{
/* Substitute into the pack expansion. */
ce->value = tsubst_pack_expansion (ce->value, args, complain,
in_decl);
if (ce->value == error_mark_node
|| PACK_EXPANSION_P (ce->value))
;
else if (TREE_VEC_LENGTH (ce->value) == 1)
/* Just move the argument into place. */
ce->value = TREE_VEC_ELT (ce->value, 0);
else
{
/* Update the length of the final CONSTRUCTOR
arguments vector, and note that we will need to
copy.*/
newlen = newlen + TREE_VEC_LENGTH (ce->value) - 1;
need_copy_p = true;
}
}
else
ce->value = RECUR (ce->value);
}
if (need_copy_p)
{
vec<constructor_elt, va_gc> *old_n = n;
vec_alloc (n, newlen);
FOR_EACH_VEC_ELT (*old_n, idx, ce)
{
if (TREE_CODE (ce->value) == TREE_VEC)
{
int i, len = TREE_VEC_LENGTH (ce->value);
for (i = 0; i < len; ++i)
CONSTRUCTOR_APPEND_ELT (n, 0,
TREE_VEC_ELT (ce->value, i));
}
else
CONSTRUCTOR_APPEND_ELT (n, 0, ce->value);
}
}
r = build_constructor (init_list_type_node, n);
CONSTRUCTOR_IS_DIRECT_INIT (r) = CONSTRUCTOR_IS_DIRECT_INIT (t);
if (TREE_HAS_CONSTRUCTOR (t))
RETURN (finish_compound_literal (type, r, complain));
TREE_TYPE (r) = type;
RETURN (r);
}
case TYPEID_EXPR:
{
tree operand_0 = TREE_OPERAND (t, 0);
if (TYPE_P (operand_0))
{
operand_0 = tsubst (operand_0, args, complain, in_decl);
RETURN (get_typeid (operand_0, complain));
}
else
{
operand_0 = RECUR (operand_0);
RETURN (build_typeid (operand_0, complain));
}
}
case VAR_DECL:
if (!args)
RETURN (t);
else if (DECL_PACK_P (t))
{
/* We don't build decls for an instantiation of a
variadic capture proxy, we instantiate the elements
when needed. */
gcc_assert (DECL_HAS_VALUE_EXPR_P (t));
return RECUR (DECL_VALUE_EXPR (t));
}
/* Fall through */
case PARM_DECL:
{
tree r = tsubst_copy (t, args, complain, in_decl);
/* ??? We're doing a subset of finish_id_expression here. */
if (VAR_P (r)
&& !processing_template_decl
&& !cp_unevaluated_operand
&& (TREE_STATIC (r) || DECL_EXTERNAL (r))
&& CP_DECL_THREAD_LOCAL_P (r))
{
if (tree wrap = get_tls_wrapper_fn (r))
/* Replace an evaluated use of the thread_local variable with
a call to its wrapper. */
r = build_cxx_call (wrap, 0, NULL, tf_warning_or_error);
}
else if (outer_automatic_var_p (r))
{
r = process_outer_var_ref (r, complain);
if (is_capture_proxy (r))
register_local_specialization (r, t);
}
if (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE)
/* If the original type was a reference, we'll be wrapped in
the appropriate INDIRECT_REF. */
r = convert_from_reference (r);
RETURN (r);
}
case VA_ARG_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
RETURN (build_x_va_arg (EXPR_LOCATION (t), op0, type));
}
case OFFSETOF_EXPR:
{
tree object_ptr
= tsubst_copy_and_build (TREE_OPERAND (t, 1), args, complain,
in_decl, /*function_p=*/false,
/*integral_constant_expression_p=*/false);
RETURN (finish_offsetof (object_ptr,
RECUR (TREE_OPERAND (t, 0)),
EXPR_LOCATION (t)));
}
case ADDRESSOF_EXPR:
RETURN (cp_build_addressof (EXPR_LOCATION (t),
RECUR (TREE_OPERAND (t, 0)), complain));
case TRAIT_EXPR:
{
tree type1 = tsubst (TRAIT_EXPR_TYPE1 (t), args,
complain, in_decl);
tree type2 = TRAIT_EXPR_TYPE2 (t);
if (type2 && TREE_CODE (type2) == TREE_LIST)
type2 = RECUR (type2);
else if (type2)
type2 = tsubst (type2, args, complain, in_decl);
RETURN (finish_trait_expr (TRAIT_EXPR_KIND (t), type1, type2));
}
case STMT_EXPR:
{
tree old_stmt_expr = cur_stmt_expr;
tree stmt_expr = begin_stmt_expr ();
cur_stmt_expr = stmt_expr;
tsubst_expr (STMT_EXPR_STMT (t), args, complain, in_decl,
integral_constant_expression_p);
stmt_expr = finish_stmt_expr (stmt_expr, false);
cur_stmt_expr = old_stmt_expr;
/* If the resulting list of expression statement is empty,
fold it further into void_node. */
if (empty_expr_stmt_p (stmt_expr))
stmt_expr = void_node;
RETURN (stmt_expr);
}
case LAMBDA_EXPR:
{
tree r = build_lambda_expr ();
tree type = tsubst (LAMBDA_EXPR_CLOSURE (t), args, complain, NULL_TREE);
LAMBDA_EXPR_CLOSURE (r) = type;
CLASSTYPE_LAMBDA_EXPR (type) = r;
LAMBDA_EXPR_LOCATION (r)
= LAMBDA_EXPR_LOCATION (t);
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (r)
= LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (t);
LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t);
LAMBDA_EXPR_DISCRIMINATOR (r)
= (LAMBDA_EXPR_DISCRIMINATOR (t));
tree scope = LAMBDA_EXPR_EXTRA_SCOPE (t);
if (!scope)
/* No substitution needed. */;
else if (VAR_OR_FUNCTION_DECL_P (scope))
/* For a function or variable scope, we want to use tsubst so that we
don't complain about referring to an auto before deduction. */
scope = tsubst (scope, args, complain, in_decl);
else if (TREE_CODE (scope) == PARM_DECL)
{
/* Look up the parameter we want directly, as tsubst_copy
doesn't do what we need. */
tree fn = tsubst (DECL_CONTEXT (scope), args, complain, in_decl);
tree parm = FUNCTION_FIRST_USER_PARM (fn);
while (DECL_PARM_INDEX (parm) != DECL_PARM_INDEX (scope))
parm = DECL_CHAIN (parm);
scope = parm;
/* FIXME Work around the parm not having DECL_CONTEXT set. */
if (DECL_CONTEXT (scope) == NULL_TREE)
DECL_CONTEXT (scope) = fn;
}
else if (TREE_CODE (scope) == FIELD_DECL)
/* For a field, use tsubst_copy so that we look up the existing field
rather than build a new one. */
scope = RECUR (scope);
else
gcc_unreachable ();
LAMBDA_EXPR_EXTRA_SCOPE (r) = scope;
gcc_assert (LAMBDA_EXPR_THIS_CAPTURE (t) == NULL_TREE
&& LAMBDA_EXPR_PENDING_PROXIES (t) == NULL);
/* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */
determine_visibility (TYPE_NAME (type));
/* Now that we know visibility, instantiate the type so we have a
declaration of the op() for later calls to lambda_function. */
complete_type (type);
if (tree fn = lambda_function (type))
LAMBDA_EXPR_RETURN_TYPE (r) = TREE_TYPE (TREE_TYPE (fn));
LAMBDA_EXPR_THIS_CAPTURE (r) = NULL_TREE;
insert_pending_capture_proxies ();
RETURN (build_lambda_object (r));
}
case TARGET_EXPR:
/* We can get here for a constant initializer of non-dependent type.
FIXME stop folding in cp_parser_initializer_clause. */
{
tree r = get_target_expr_sfinae (RECUR (TARGET_EXPR_INITIAL (t)),
complain);
RETURN (r);
}
case TRANSACTION_EXPR:
RETURN (tsubst_expr(t, args, complain, in_decl,
integral_constant_expression_p));
case PAREN_EXPR:
RETURN (finish_parenthesized_expr (RECUR (TREE_OPERAND (t, 0))));
case VEC_PERM_EXPR:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = RECUR (TREE_OPERAND (t, 1));
tree op2 = RECUR (TREE_OPERAND (t, 2));
RETURN (build_x_vec_perm_expr (input_location, op0, op1, op2,
complain));
}
case REQUIRES_EXPR:
RETURN (tsubst_requires_expr (t, args, complain, in_decl));
default:
/* Handle Objective-C++ constructs, if appropriate. */
{
tree subst
= objcp_tsubst_copy_and_build (t, args, complain,
in_decl, /*function_p=*/false);
if (subst)
RETURN (subst);
}
RETURN (tsubst_copy (t, args, complain, in_decl));
}
#undef RECUR
#undef RETURN
out:
input_location = loc;
return retval;
}
/* Verify that the instantiated ARGS are valid. For type arguments,
make sure that the type's linkage is ok. For non-type arguments,
make sure they are constants if they are integral or enumerations.
Emit an error under control of COMPLAIN, and return TRUE on error. */
static bool
check_instantiated_arg (tree tmpl, tree t, tsubst_flags_t complain)
{
if (dependent_template_arg_p (t))
return false;
if (ARGUMENT_PACK_P (t))
{
tree vec = ARGUMENT_PACK_ARGS (t);
int len = TREE_VEC_LENGTH (vec);
bool result = false;
int i;
for (i = 0; i < len; ++i)
if (check_instantiated_arg (tmpl, TREE_VEC_ELT (vec, i), complain))
result = true;
return result;
}
else if (TYPE_P (t))
{
/* [basic.link]: A name with no linkage (notably, the name
of a class or enumeration declared in a local scope)
shall not be used to declare an entity with linkage.
This implies that names with no linkage cannot be used as
template arguments
DR 757 relaxes this restriction for C++0x. */
tree nt = (cxx_dialect > cxx98 ? NULL_TREE
: no_linkage_check (t, /*relaxed_p=*/false));
if (nt)
{
/* DR 488 makes use of a type with no linkage cause
type deduction to fail. */
if (complain & tf_error)
{
if (TYPE_UNNAMED_P (nt))
error ("%qT is/uses unnamed type", t);
else
error ("template argument for %qD uses local type %qT",
tmpl, t);
}
return true;
}
/* In order to avoid all sorts of complications, we do not
allow variably-modified types as template arguments. */
else if (variably_modified_type_p (t, NULL_TREE))
{
if (complain & tf_error)
error ("%qT is a variably modified type", t);
return true;
}
}
/* Class template and alias template arguments should be OK. */
else if (DECL_TYPE_TEMPLATE_P (t))
;
/* A non-type argument of integral or enumerated type must be a
constant. */
else if (TREE_TYPE (t)
&& INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (t))
&& !REFERENCE_REF_P (t)
&& !TREE_CONSTANT (t))
{
if (complain & tf_error)
error ("integral expression %qE is not constant", t);
return true;
}
return false;
}
static bool
check_instantiated_args (tree tmpl, tree args, tsubst_flags_t complain)
{
int ix, len = DECL_NTPARMS (tmpl);
bool result = false;
for (ix = 0; ix != len; ix++)
{
if (check_instantiated_arg (tmpl, TREE_VEC_ELT (args, ix), complain))
result = true;
}
if (result && (complain & tf_error))
error (" trying to instantiate %qD", tmpl);
return result;
}
/* We're out of SFINAE context now, so generate diagnostics for the access
errors we saw earlier when instantiating D from TMPL and ARGS. */
static void
recheck_decl_substitution (tree d, tree tmpl, tree args)
{
tree pattern = DECL_TEMPLATE_RESULT (tmpl);
tree type = TREE_TYPE (pattern);
location_t loc = input_location;
push_access_scope (d);
push_deferring_access_checks (dk_no_deferred);
input_location = DECL_SOURCE_LOCATION (pattern);
tsubst (type, args, tf_warning_or_error, d);
input_location = loc;
pop_deferring_access_checks ();
pop_access_scope (d);
}
/* Instantiate the indicated variable, function, or alias template TMPL with
the template arguments in TARG_PTR. */
static tree
instantiate_template_1 (tree tmpl, tree orig_args, tsubst_flags_t complain)
{
tree targ_ptr = orig_args;
tree fndecl;
tree gen_tmpl;
tree spec;
bool access_ok = true;
if (tmpl == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL);
/* If this function is a clone, handle it specially. */
if (DECL_CLONED_FUNCTION_P (tmpl))
{
tree spec;
tree clone;
/* Use DECL_ABSTRACT_ORIGIN because only FUNCTION_DECLs have
DECL_CLONED_FUNCTION. */
spec = instantiate_template (DECL_ABSTRACT_ORIGIN (tmpl),
targ_ptr, complain);
if (spec == error_mark_node)
return error_mark_node;
/* Look for the clone. */
FOR_EACH_CLONE (clone, spec)
if (DECL_NAME (clone) == DECL_NAME (tmpl))
return clone;
/* We should always have found the clone by now. */
gcc_unreachable ();
return NULL_TREE;
}
if (targ_ptr == error_mark_node)
return error_mark_node;
/* Check to see if we already have this specialization. */
gen_tmpl = most_general_template (tmpl);
if (TMPL_ARGS_DEPTH (targ_ptr)
< TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)))
/* targ_ptr only has the innermost template args, so add the outer ones
from tmpl, which could be either a partial instantiation or gen_tmpl (in
the case of a non-dependent call within a template definition). */
targ_ptr = (add_outermost_template_args
(DECL_TI_ARGS (DECL_TEMPLATE_RESULT (tmpl)),
targ_ptr));
/* It would be nice to avoid hashing here and then again in tsubst_decl,
but it doesn't seem to be on the hot path. */
spec = retrieve_specialization (gen_tmpl, targ_ptr, 0);
gcc_assert (tmpl == gen_tmpl
|| ((fndecl = retrieve_specialization (tmpl, orig_args, 0))
== spec)
|| fndecl == NULL_TREE);
if (spec != NULL_TREE)
{
if (FNDECL_HAS_ACCESS_ERRORS (spec))
{
if (complain & tf_error)
recheck_decl_substitution (spec, gen_tmpl, targ_ptr);
return error_mark_node;
}
return spec;
}
if (check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (targ_ptr),
complain))
return error_mark_node;
/* We are building a FUNCTION_DECL, during which the access of its
parameters and return types have to be checked. However this
FUNCTION_DECL which is the desired context for access checking
is not built yet. We solve this chicken-and-egg problem by
deferring all checks until we have the FUNCTION_DECL. */
push_deferring_access_checks (dk_deferred);
/* Instantiation of the function happens in the context of the function
template, not the context of the overload resolution we're doing. */
push_to_top_level ();
/* If there are dependent arguments, e.g. because we're doing partial
ordering, make sure processing_template_decl stays set. */
if (uses_template_parms (targ_ptr))
++processing_template_decl;
if (DECL_CLASS_SCOPE_P (gen_tmpl))
{
tree ctx = tsubst_aggr_type (DECL_CONTEXT (gen_tmpl), targ_ptr,
complain, gen_tmpl, true);
push_nested_class (ctx);
}
tree pattern = DECL_TEMPLATE_RESULT (gen_tmpl);
fndecl = NULL_TREE;
if (VAR_P (pattern))
{
/* We need to determine if we're using a partial or explicit
specialization now, because the type of the variable could be
different. */
tree tid = lookup_template_variable (gen_tmpl, targ_ptr);
tree elt = most_specialized_partial_spec (tid, complain);
if (elt == error_mark_node)
pattern = error_mark_node;
else if (elt)
{
tree partial_tmpl = TREE_VALUE (elt);
tree partial_args = TREE_PURPOSE (elt);
tree partial_pat = DECL_TEMPLATE_RESULT (partial_tmpl);
fndecl = tsubst (partial_pat, partial_args, complain, gen_tmpl);
}
}
/* Substitute template parameters to obtain the specialization. */
if (fndecl == NULL_TREE)
fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl);
if (DECL_CLASS_SCOPE_P (gen_tmpl))
pop_nested_class ();
pop_from_top_level ();
if (fndecl == error_mark_node)
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* The DECL_TI_TEMPLATE should always be the immediate parent
template, not the most general template. */
DECL_TI_TEMPLATE (fndecl) = tmpl;
DECL_TI_ARGS (fndecl) = targ_ptr;
/* Now we know the specialization, compute access previously
deferred. Do no access control for inheriting constructors,
as we already checked access for the inherited constructor. */
if (!(flag_new_inheriting_ctors
&& DECL_INHERITED_CTOR (fndecl)))
{
push_access_scope (fndecl);
if (!perform_deferred_access_checks (complain))
access_ok = false;
pop_access_scope (fndecl);
}
pop_deferring_access_checks ();
/* If we've just instantiated the main entry point for a function,
instantiate all the alternate entry points as well. We do this
by cloning the instantiation of the main entry point, not by
instantiating the template clones. */
if (DECL_CHAIN (gen_tmpl) && DECL_CLONED_FUNCTION_P (DECL_CHAIN (gen_tmpl)))
clone_function_decl (fndecl, /*update_method_vec_p=*/0);
if (!access_ok)
{
if (!(complain & tf_error))
{
/* Remember to reinstantiate when we're out of SFINAE so the user
can see the errors. */
FNDECL_HAS_ACCESS_ERRORS (fndecl) = true;
}
return error_mark_node;
}
return fndecl;
}
/* Wrapper for instantiate_template_1. */
tree
instantiate_template (tree tmpl, tree orig_args, tsubst_flags_t complain)
{
tree ret;
timevar_push (TV_TEMPLATE_INST);
ret = instantiate_template_1 (tmpl, orig_args, complain);
timevar_pop (TV_TEMPLATE_INST);
return ret;
}
/* Instantiate the alias template TMPL with ARGS. Also push a template
instantiation level, which instantiate_template doesn't do because
functions and variables have sufficient context established by the
callers. */
static tree
instantiate_alias_template (tree tmpl, tree args, tsubst_flags_t complain)
{
struct pending_template *old_last_pend = last_pending_template;
struct tinst_level *old_error_tinst = last_error_tinst_level;
if (tmpl == error_mark_node || args == error_mark_node)
return error_mark_node;
tree tinst = build_tree_list (tmpl, args);
if (!push_tinst_level (tinst))
{
ggc_free (tinst);
return error_mark_node;
}
args =
coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (tmpl),
args, tmpl, complain,
/*require_all_args=*/true,
/*use_default_args=*/true);
tree r = instantiate_template (tmpl, args, complain);
pop_tinst_level ();
/* We can't free this if a pending_template entry or last_error_tinst_level
is pointing at it. */
if (last_pending_template == old_last_pend
&& last_error_tinst_level == old_error_tinst)
ggc_free (tinst);
return r;
}
/* PARM is a template parameter pack for FN. Returns true iff
PARM is used in a deducible way in the argument list of FN. */
static bool
pack_deducible_p (tree parm, tree fn)
{
tree t = FUNCTION_FIRST_USER_PARMTYPE (fn);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
tree packs;
if (!PACK_EXPANSION_P (type))
continue;
for (packs = PACK_EXPANSION_PARAMETER_PACKS (type);
packs; packs = TREE_CHAIN (packs))
if (template_args_equal (TREE_VALUE (packs), parm))
{
/* The template parameter pack is used in a function parameter
pack. If this is the end of the parameter list, the
template parameter pack is deducible. */
if (TREE_CHAIN (t) == void_list_node)
return true;
else
/* Otherwise, not. Well, it could be deduced from
a non-pack parameter, but doing so would end up with
a deduction mismatch, so don't bother. */
return false;
}
}
/* The template parameter pack isn't used in any function parameter
packs, but it might be used deeper, e.g. tuple<Args...>. */
return true;
}
/* The FN is a TEMPLATE_DECL for a function. ARGS is an array with
NARGS elements of the arguments that are being used when calling
it. TARGS is a vector into which the deduced template arguments
are placed.
Returns either a FUNCTION_DECL for the matching specialization of FN or
NULL_TREE if no suitable specialization can be found. If EXPLAIN_P is
true, diagnostics will be printed to explain why it failed.
If FN is a conversion operator, or we are trying to produce a specific
specialization, RETURN_TYPE is the return type desired.
The EXPLICIT_TARGS are explicit template arguments provided via a
template-id.
The parameter STRICT is one of:
DEDUCE_CALL:
We are deducing arguments for a function call, as in
[temp.deduct.call]. If RETURN_TYPE is non-null, we are
deducing arguments for a call to the result of a conversion
function template, as in [over.call.object].
DEDUCE_CONV:
We are deducing arguments for a conversion function, as in
[temp.deduct.conv].
DEDUCE_EXACT:
We are deducing arguments when doing an explicit instantiation
as in [temp.explicit], when determining an explicit specialization
as in [temp.expl.spec], or when taking the address of a function
template, as in [temp.deduct.funcaddr]. */
tree
fn_type_unification (tree fn,
tree explicit_targs,
tree targs,
const tree *args,
unsigned int nargs,
tree return_type,
unification_kind_t strict,
int flags,
bool explain_p,
bool decltype_p)
{
tree parms;
tree fntype;
tree decl = NULL_TREE;
tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none);
bool ok;
static int deduction_depth;
struct pending_template *old_last_pend = last_pending_template;
struct tinst_level *old_error_tinst = last_error_tinst_level;
tree orig_fn = fn;
if (flag_new_inheriting_ctors)
fn = strip_inheriting_ctors (fn);
tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn);
tree tinst;
tree r = error_mark_node;
tree full_targs = targs;
if (TMPL_ARGS_DEPTH (targs)
< TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (fn)))
full_targs = (add_outermost_template_args
(DECL_TI_ARGS (DECL_TEMPLATE_RESULT (fn)),
targs));
if (decltype_p)
complain |= tf_decltype;
/* In C++0x, it's possible to have a function template whose type depends
on itself recursively. This is most obvious with decltype, but can also
occur with enumeration scope (c++/48969). So we need to catch infinite
recursion and reject the substitution at deduction time; this function
will return error_mark_node for any repeated substitution.
This also catches excessive recursion such as when f<N> depends on
f<N-1> across all integers, and returns error_mark_node for all the
substitutions back up to the initial one.
This is, of course, not reentrant. */
if (excessive_deduction_depth)
return error_mark_node;
tinst = build_tree_list (fn, NULL_TREE);
++deduction_depth;
gcc_assert (TREE_CODE (fn) == TEMPLATE_DECL);
fntype = TREE_TYPE (fn);
if (explicit_targs)
{
/* [temp.deduct]
The specified template arguments must match the template
parameters in kind (i.e., type, nontype, template), and there
must not be more arguments than there are parameters;
otherwise type deduction fails.
Nontype arguments must match the types of the corresponding
nontype template parameters, or must be convertible to the
types of the corresponding nontype parameters as specified in
_temp.arg.nontype_, otherwise type deduction fails.
All references in the function type of the function template
to the corresponding template parameters are replaced by the
specified template argument values. If a substitution in a
template parameter or in the function type of the function
template results in an invalid type, type deduction fails. */
int i, len = TREE_VEC_LENGTH (tparms);
location_t loc = input_location;
bool incomplete = false;
if (explicit_targs == error_mark_node)
goto fail;
if (TMPL_ARGS_DEPTH (explicit_targs)
< TMPL_ARGS_DEPTH (full_targs))
explicit_targs = add_outermost_template_args (full_targs,
explicit_targs);
/* Adjust any explicit template arguments before entering the
substitution context. */
explicit_targs
= (coerce_template_parms (tparms, explicit_targs, NULL_TREE,
complain,
/*require_all_args=*/false,
/*use_default_args=*/false));
if (explicit_targs == error_mark_node)
goto fail;
/* Substitute the explicit args into the function type. This is
necessary so that, for instance, explicitly declared function
arguments can match null pointed constants. If we were given
an incomplete set of explicit args, we must not do semantic
processing during substitution as we could create partial
instantiations. */
for (i = 0; i < len; i++)
{
tree parm = TREE_VALUE (TREE_VEC_ELT (tparms, i));
bool parameter_pack = false;
tree targ = TREE_VEC_ELT (explicit_targs, i);
/* Dig out the actual parm. */
if (TREE_CODE (parm) == TYPE_DECL
|| TREE_CODE (parm) == TEMPLATE_DECL)
{
parm = TREE_TYPE (parm);
parameter_pack = TEMPLATE_TYPE_PARAMETER_PACK (parm);
}
else if (TREE_CODE (parm) == PARM_DECL)
{
parm = DECL_INITIAL (parm);
parameter_pack = TEMPLATE_PARM_PARAMETER_PACK (parm);
}
if (!parameter_pack && targ == NULL_TREE)
/* No explicit argument for this template parameter. */
incomplete = true;
if (parameter_pack && pack_deducible_p (parm, fn))
{
/* Mark the argument pack as "incomplete". We could
still deduce more arguments during unification.
We remove this mark in type_unification_real. */
if (targ)
{
ARGUMENT_PACK_INCOMPLETE_P(targ) = 1;
ARGUMENT_PACK_EXPLICIT_ARGS (targ)
= ARGUMENT_PACK_ARGS (targ);
}
/* We have some incomplete argument packs. */
incomplete = true;
}
}
TREE_VALUE (tinst) = explicit_targs;
if (!push_tinst_level (tinst))
{
excessive_deduction_depth = true;
goto fail;
}
processing_template_decl += incomplete;
input_location = DECL_SOURCE_LOCATION (fn);
/* Ignore any access checks; we'll see them again in
instantiate_template and they might have the wrong
access path at this point. */
push_deferring_access_checks (dk_deferred);
fntype = tsubst (TREE_TYPE (fn), explicit_targs,
complain | tf_partial | tf_fndecl_type, NULL_TREE);
pop_deferring_access_checks ();
input_location = loc;
processing_template_decl -= incomplete;
pop_tinst_level ();
if (fntype == error_mark_node)
goto fail;
/* Place the explicitly specified arguments in TARGS. */
explicit_targs = INNERMOST_TEMPLATE_ARGS (explicit_targs);
for (i = NUM_TMPL_ARGS (explicit_targs); i--;)
TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (explicit_targs, i);
}
/* Never do unification on the 'this' parameter. */
parms = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (fntype));
if (return_type && strict == DEDUCE_CALL)
{
/* We're deducing for a call to the result of a template conversion
function. The parms we really want are in return_type. */
if (POINTER_TYPE_P (return_type))
return_type = TREE_TYPE (return_type);
parms = TYPE_ARG_TYPES (return_type);
}
else if (return_type)
{
tree *new_args;
parms = tree_cons (NULL_TREE, TREE_TYPE (fntype), parms);
new_args = XALLOCAVEC (tree, nargs + 1);
new_args[0] = return_type;
memcpy (new_args + 1, args, nargs * sizeof (tree));
args = new_args;
++nargs;
}
/* We allow incomplete unification without an error message here
because the standard doesn't seem to explicitly prohibit it. Our
callers must be ready to deal with unification failures in any
event. */
TREE_VALUE (tinst) = targs;
/* If we aren't explaining yet, push tinst context so we can see where
any errors (e.g. from class instantiations triggered by instantiation
of default template arguments) come from. If we are explaining, this
context is redundant. */
if (!explain_p && !push_tinst_level (tinst))
{
excessive_deduction_depth = true;
goto fail;
}
/* type_unification_real will pass back any access checks from default
template argument substitution. */
vec<deferred_access_check, va_gc> *checks;
checks = NULL;
ok = !type_unification_real (DECL_INNERMOST_TEMPLATE_PARMS (fn),
full_targs, parms, args, nargs, /*subr=*/0,
strict, flags, &checks, explain_p);
if (!explain_p)
pop_tinst_level ();
if (!ok)
goto fail;
/* Now that we have bindings for all of the template arguments,
ensure that the arguments deduced for the template template
parameters have compatible template parameter lists. We cannot
check this property before we have deduced all template
arguments, because the template parameter types of a template
template parameter might depend on prior template parameters
deduced after the template template parameter. The following
ill-formed example illustrates this issue:
template<typename T, template<T> class C> void f(C<5>, T);
template<int N> struct X {};
void g() {
f(X<5>(), 5l); // error: template argument deduction fails
}
The template parameter list of 'C' depends on the template type
parameter 'T', but 'C' is deduced to 'X' before 'T' is deduced to
'long'. Thus, we can't check that 'C' cannot bind to 'X' at the
time that we deduce 'C'. */
if (!template_template_parm_bindings_ok_p
(DECL_INNERMOST_TEMPLATE_PARMS (fn), targs))
{
unify_inconsistent_template_template_parameters (explain_p);
goto fail;
}
/* All is well so far. Now, check:
[temp.deduct]
When all template arguments have been deduced, all uses of
template parameters in nondeduced contexts are replaced with
the corresponding deduced argument values. If the
substitution results in an invalid type, as described above,
type deduction fails. */
TREE_VALUE (tinst) = targs;
if (!push_tinst_level (tinst))
{
excessive_deduction_depth = true;
goto fail;
}
/* Also collect access checks from the instantiation. */
reopen_deferring_access_checks (checks);
decl = instantiate_template (fn, targs, complain);
checks = get_deferred_access_checks ();
pop_deferring_access_checks ();
pop_tinst_level ();
if (decl == error_mark_node)
goto fail;
/* Now perform any access checks encountered during substitution. */
push_access_scope (decl);
ok = perform_access_checks (checks, complain);
pop_access_scope (decl);
if (!ok)
goto fail;
/* If we're looking for an exact match, check that what we got
is indeed an exact match. It might not be if some template
parameters are used in non-deduced contexts. But don't check
for an exact match if we have dependent template arguments;
in that case we're doing partial ordering, and we already know
that we have two candidates that will provide the actual type. */
if (strict == DEDUCE_EXACT && !any_dependent_template_arguments_p (targs))
{
tree substed = TREE_TYPE (decl);
unsigned int i;
tree sarg
= skip_artificial_parms_for (decl, TYPE_ARG_TYPES (substed));
if (return_type)
sarg = tree_cons (NULL_TREE, TREE_TYPE (substed), sarg);
for (i = 0; i < nargs && sarg; ++i, sarg = TREE_CHAIN (sarg))
if (!same_type_p (args[i], TREE_VALUE (sarg)))
{
unify_type_mismatch (explain_p, args[i],
TREE_VALUE (sarg));
goto fail;
}
}
/* After doing deduction with the inherited constructor, actually return an
instantiation of the inheriting constructor. */
if (orig_fn != fn)
decl = instantiate_template (orig_fn, targs, complain);
r = decl;
fail:
--deduction_depth;
if (excessive_deduction_depth)
{
if (deduction_depth == 0)
/* Reset once we're all the way out. */
excessive_deduction_depth = false;
}
/* We can't free this if a pending_template entry or last_error_tinst_level
is pointing at it. */
if (last_pending_template == old_last_pend
&& last_error_tinst_level == old_error_tinst)
ggc_free (tinst);
return r;
}
/* Adjust types before performing type deduction, as described in
[temp.deduct.call] and [temp.deduct.conv]. The rules in these two
sections are symmetric. PARM is the type of a function parameter
or the return type of the conversion function. ARG is the type of
the argument passed to the call, or the type of the value
initialized with the result of the conversion function.
ARG_EXPR is the original argument expression, which may be null. */
static int
maybe_adjust_types_for_deduction (unification_kind_t strict,
tree* parm,
tree* arg,
tree arg_expr)
{
int result = 0;
switch (strict)
{
case DEDUCE_CALL:
break;
case DEDUCE_CONV:
/* Swap PARM and ARG throughout the remainder of this
function; the handling is precisely symmetric since PARM
will initialize ARG rather than vice versa. */
std::swap (parm, arg);
break;
case DEDUCE_EXACT:
/* Core issue #873: Do the DR606 thing (see below) for these cases,
too, but here handle it by stripping the reference from PARM
rather than by adding it to ARG. */
if (TREE_CODE (*parm) == REFERENCE_TYPE
&& TYPE_REF_IS_RVALUE (*parm)
&& TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM
&& cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED
&& TREE_CODE (*arg) == REFERENCE_TYPE
&& !TYPE_REF_IS_RVALUE (*arg))
*parm = TREE_TYPE (*parm);
/* Nothing else to do in this case. */
return 0;
default:
gcc_unreachable ();
}
if (TREE_CODE (*parm) != REFERENCE_TYPE)
{
/* [temp.deduct.call]
If P is not a reference type:
--If A is an array type, the pointer type produced by the
array-to-pointer standard conversion (_conv.array_) is
used in place of A for type deduction; otherwise,
--If A is a function type, the pointer type produced by
the function-to-pointer standard conversion
(_conv.func_) is used in place of A for type deduction;
otherwise,
--If A is a cv-qualified type, the top level
cv-qualifiers of A's type are ignored for type
deduction. */
if (TREE_CODE (*arg) == ARRAY_TYPE)
*arg = build_pointer_type (TREE_TYPE (*arg));
else if (TREE_CODE (*arg) == FUNCTION_TYPE)
*arg = build_pointer_type (*arg);
else
*arg = TYPE_MAIN_VARIANT (*arg);
}
/* [14.8.2.1/3 temp.deduct.call], "A forwarding reference is an rvalue
reference to a cv-unqualified template parameter that does not represent a
template parameter of a class template (during class template argument
deduction (13.3.1.8)). If P is a forwarding reference and the argument is
an lvalue, the type "lvalue reference to A" is used in place of A for type
deduction. */
if (TREE_CODE (*parm) == REFERENCE_TYPE
&& TYPE_REF_IS_RVALUE (*parm)
&& TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM
&& !TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (*parm))
&& cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED
&& (arg_expr ? lvalue_p (arg_expr)
/* try_one_overload doesn't provide an arg_expr, but
functions are always lvalues. */
: TREE_CODE (*arg) == FUNCTION_TYPE))
*arg = build_reference_type (*arg);
/* [temp.deduct.call]
If P is a cv-qualified type, the top level cv-qualifiers
of P's type are ignored for type deduction. If P is a
reference type, the type referred to by P is used for
type deduction. */
*parm = TYPE_MAIN_VARIANT (*parm);
if (TREE_CODE (*parm) == REFERENCE_TYPE)
{
*parm = TREE_TYPE (*parm);
result |= UNIFY_ALLOW_OUTER_MORE_CV_QUAL;
}
/* DR 322. For conversion deduction, remove a reference type on parm
too (which has been swapped into ARG). */
if (strict == DEDUCE_CONV && TREE_CODE (*arg) == REFERENCE_TYPE)
*arg = TREE_TYPE (*arg);
return result;
}
/* Subroutine of unify_one_argument. PARM is a function parameter of a
template which does contain any deducible template parameters; check if
ARG is a suitable match for it. STRICT, FLAGS and EXPLAIN_P are as in
unify_one_argument. */
static int
check_non_deducible_conversion (tree parm, tree arg, int strict,
int flags, bool explain_p)
{
tree type;
if (!TYPE_P (arg))
type = TREE_TYPE (arg);
else
type = arg;
if (same_type_p (parm, type))
return unify_success (explain_p);
if (strict == DEDUCE_CONV)
{
if (can_convert_arg (type, parm, NULL_TREE, flags,
explain_p ? tf_warning_or_error : tf_none))
return unify_success (explain_p);
}
else if (strict != DEDUCE_EXACT)
{
if (can_convert_arg (parm, type,
TYPE_P (arg) ? NULL_TREE : arg,
flags, explain_p ? tf_warning_or_error : tf_none))
return unify_success (explain_p);
}
if (strict == DEDUCE_EXACT)
return unify_type_mismatch (explain_p, parm, arg);
else
return unify_arg_conversion (explain_p, parm, type, arg);
}
static bool uses_deducible_template_parms (tree type);
/* Returns true iff the expression EXPR is one from which a template
argument can be deduced. In other words, if it's an undecorated
use of a template non-type parameter. */
static bool
deducible_expression (tree expr)
{
/* Strip implicit conversions. */
while (CONVERT_EXPR_P (expr))
expr = TREE_OPERAND (expr, 0);
return (TREE_CODE (expr) == TEMPLATE_PARM_INDEX);
}
/* Returns true iff the array domain DOMAIN uses a template parameter in a
deducible way; that is, if it has a max value of <PARM> - 1. */
static bool
deducible_array_bound (tree domain)
{
if (domain == NULL_TREE)
return false;
tree max = TYPE_MAX_VALUE (domain);
if (TREE_CODE (max) != MINUS_EXPR)
return false;
return deducible_expression (TREE_OPERAND (max, 0));
}
/* Returns true iff the template arguments ARGS use a template parameter
in a deducible way. */
static bool
deducible_template_args (tree args)
{
for (int i = 0; i < TREE_VEC_LENGTH (args); ++i)
{
bool deducible;
tree elt = TREE_VEC_ELT (args, i);
if (ARGUMENT_PACK_P (elt))
deducible = deducible_template_args (ARGUMENT_PACK_ARGS (elt));
else
{
if (PACK_EXPANSION_P (elt))
elt = PACK_EXPANSION_PATTERN (elt);
if (TREE_CODE (elt) == TEMPLATE_TEMPLATE_PARM)
deducible = true;
else if (TYPE_P (elt))
deducible = uses_deducible_template_parms (elt);
else
deducible = deducible_expression (elt);
}
if (deducible)
return true;
}
return false;
}
/* Returns true iff TYPE contains any deducible references to template
parameters, as per 14.8.2.5. */
static bool
uses_deducible_template_parms (tree type)
{
if (PACK_EXPANSION_P (type))
type = PACK_EXPANSION_PATTERN (type);
/* T
cv-list T
TT<T>
TT<i>
TT<> */
if (TREE_CODE (type) == TEMPLATE_TYPE_PARM
|| TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
return true;
/* T*
T&
T&& */
if (POINTER_TYPE_P (type))
return uses_deducible_template_parms (TREE_TYPE (type));
/* T[integer-constant ]
type [i] */
if (TREE_CODE (type) == ARRAY_TYPE)
return (uses_deducible_template_parms (TREE_TYPE (type))
|| deducible_array_bound (TYPE_DOMAIN (type)));
/* T type ::*
type T::*
T T::*
T (type ::*)()
type (T::*)()
type (type ::*)(T)
type (T::*)(T)
T (type ::*)(T)
T (T::*)()
T (T::*)(T) */
if (TYPE_PTRMEM_P (type))
return (uses_deducible_template_parms (TYPE_PTRMEM_CLASS_TYPE (type))
|| (uses_deducible_template_parms
(TYPE_PTRMEM_POINTED_TO_TYPE (type))));
/* template-name <T> (where template-name refers to a class template)
template-name <i> (where template-name refers to a class template) */
if (CLASS_TYPE_P (type)
&& CLASSTYPE_TEMPLATE_INFO (type)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type)))
return deducible_template_args (INNERMOST_TEMPLATE_ARGS
(CLASSTYPE_TI_ARGS (type)));
/* type (T)
T()
T(T) */
if (TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == METHOD_TYPE)
{
if (uses_deducible_template_parms (TREE_TYPE (type)))
return true;
tree parm = TYPE_ARG_TYPES (type);
if (TREE_CODE (type) == METHOD_TYPE)
parm = TREE_CHAIN (parm);
for (; parm; parm = TREE_CHAIN (parm))
if (uses_deducible_template_parms (TREE_VALUE (parm)))
return true;
}
return false;
}
/* Subroutine of type_unification_real and unify_pack_expansion to
handle unification of a single P/A pair. Parameters are as
for those functions. */
static int
unify_one_argument (tree tparms, tree targs, tree parm, tree arg,
int subr, unification_kind_t strict,
bool explain_p)
{
tree arg_expr = NULL_TREE;
int arg_strict;
if (arg == error_mark_node || parm == error_mark_node)
return unify_invalid (explain_p);
if (arg == unknown_type_node)
/* We can't deduce anything from this, but we might get all the
template args from other function args. */
return unify_success (explain_p);
/* Implicit conversions (Clause 4) will be performed on a function
argument to convert it to the type of the corresponding function
parameter if the parameter type contains no template-parameters that
participate in template argument deduction. */
if (strict != DEDUCE_EXACT
&& TYPE_P (parm) && !uses_deducible_template_parms (parm))
/* For function parameters with no deducible template parameters,
just return. We'll check non-dependent conversions later. */
return unify_success (explain_p);
switch (strict)
{
case DEDUCE_CALL:
arg_strict = (UNIFY_ALLOW_OUTER_LEVEL
| UNIFY_ALLOW_MORE_CV_QUAL
| UNIFY_ALLOW_DERIVED);
break;
case DEDUCE_CONV:
arg_strict = UNIFY_ALLOW_LESS_CV_QUAL;
break;
case DEDUCE_EXACT:
arg_strict = UNIFY_ALLOW_NONE;
break;
default:
gcc_unreachable ();
}
/* We only do these transformations if this is the top-level
parameter_type_list in a call or declaration matching; in other
situations (nested function declarators, template argument lists) we
won't be comparing a type to an expression, and we don't do any type
adjustments. */
if (!subr)
{
if (!TYPE_P (arg))
{
gcc_assert (TREE_TYPE (arg) != NULL_TREE);
if (type_unknown_p (arg))
{
/* [temp.deduct.type] A template-argument can be
deduced from a pointer to function or pointer
to member function argument if the set of
overloaded functions does not contain function
templates and at most one of a set of
overloaded functions provides a unique
match. */
if (resolve_overloaded_unification
(tparms, targs, parm, arg, strict,
arg_strict, explain_p))
return unify_success (explain_p);
return unify_overload_resolution_failure (explain_p, arg);
}
arg_expr = arg;
arg = unlowered_expr_type (arg);
if (arg == error_mark_node)
return unify_invalid (explain_p);
}
arg_strict |=
maybe_adjust_types_for_deduction (strict, &parm, &arg, arg_expr);
}
else
if ((TYPE_P (parm) || TREE_CODE (parm) == TEMPLATE_DECL)
!= (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL))
return unify_template_argument_mismatch (explain_p, parm, arg);
/* For deduction from an init-list we need the actual list. */
if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr))
arg = arg_expr;
return unify (tparms, targs, parm, arg, arg_strict, explain_p);
}
/* for_each_template_parm callback that always returns 0. */
static int
zero_r (tree, void *)
{
return 0;
}
/* for_each_template_parm any_fn callback to handle deduction of a template
type argument from the type of an array bound. */
static int
array_deduction_r (tree t, void *data)
{
tree_pair_p d = (tree_pair_p)data;
tree &tparms = d->purpose;
tree &targs = d->value;
if (TREE_CODE (t) == ARRAY_TYPE)
if (tree dom = TYPE_DOMAIN (t))
if (tree max = TYPE_MAX_VALUE (dom))
{
if (TREE_CODE (max) == MINUS_EXPR)
max = TREE_OPERAND (max, 0);
if (TREE_CODE (max) == TEMPLATE_PARM_INDEX)
unify (tparms, targs, TREE_TYPE (max), size_type_node,
UNIFY_ALLOW_NONE, /*explain*/false);
}
/* Keep walking. */
return 0;
}
/* Try to deduce any not-yet-deduced template type arguments from the type of
an array bound. This is handled separately from unify because 14.8.2.5 says
"The type of a type parameter is only deduced from an array bound if it is
not otherwise deduced." */
static void
try_array_deduction (tree tparms, tree targs, tree parm)
{
tree_pair_s data = { tparms, targs };
hash_set<tree> visited;
for_each_template_parm (parm, zero_r, &data, &visited,
/*nondeduced*/false, array_deduction_r);
}
/* Most parms like fn_type_unification.
If SUBR is 1, we're being called recursively (to unify the
arguments of a function or method parameter of a function
template).
CHECKS is a pointer to a vector of access checks encountered while
substituting default template arguments. */
static int
type_unification_real (tree tparms,
tree full_targs,
tree xparms,
const tree *xargs,
unsigned int xnargs,
int subr,
unification_kind_t strict,
int flags,
vec<deferred_access_check, va_gc> **checks,
bool explain_p)
{
tree parm, arg;
int i;
int ntparms = TREE_VEC_LENGTH (tparms);
int saw_undeduced = 0;
tree parms;
const tree *args;
unsigned int nargs;
unsigned int ia;
gcc_assert (TREE_CODE (tparms) == TREE_VEC);
gcc_assert (xparms == NULL_TREE || TREE_CODE (xparms) == TREE_LIST);
gcc_assert (ntparms > 0);
tree targs = INNERMOST_TEMPLATE_ARGS (full_targs);
/* Reset the number of non-defaulted template arguments contained
in TARGS. */
NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs) = NULL_TREE;
again:
parms = xparms;
args = xargs;
nargs = xnargs;
ia = 0;
while (parms && parms != void_list_node
&& ia < nargs)
{
parm = TREE_VALUE (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION
&& (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node))
/* For a function parameter pack that occurs at the end of the
parameter-declaration-list, the type A of each remaining
argument of the call is compared with the type P of the
declarator-id of the function parameter pack. */
break;
parms = TREE_CHAIN (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION)
/* For a function parameter pack that does not occur at the
end of the parameter-declaration-list, the type of the
parameter pack is a non-deduced context. */
continue;
arg = args[ia];
++ia;
if (unify_one_argument (tparms, full_targs, parm, arg, subr, strict,
explain_p))
return 1;
}
if (parms
&& parms != void_list_node
&& TREE_CODE (TREE_VALUE (parms)) == TYPE_PACK_EXPANSION)
{
/* Unify the remaining arguments with the pack expansion type. */
tree argvec;
tree parmvec = make_tree_vec (1);
/* Allocate a TREE_VEC and copy in all of the arguments */
argvec = make_tree_vec (nargs - ia);
for (i = 0; ia < nargs; ++ia, ++i)
TREE_VEC_ELT (argvec, i) = args[ia];
/* Copy the parameter into parmvec. */
TREE_VEC_ELT (parmvec, 0) = TREE_VALUE (parms);
if (unify_pack_expansion (tparms, full_targs, parmvec, argvec, strict,
/*subr=*/subr, explain_p))
return 1;
/* Advance to the end of the list of parameters. */
parms = TREE_CHAIN (parms);
}
/* Fail if we've reached the end of the parm list, and more args
are present, and the parm list isn't variadic. */
if (ia < nargs && parms == void_list_node)
return unify_too_many_arguments (explain_p, nargs, ia);
/* Fail if parms are left and they don't have default values and
they aren't all deduced as empty packs (c++/57397). This is
consistent with sufficient_parms_p. */
if (parms && parms != void_list_node
&& TREE_PURPOSE (parms) == NULL_TREE)
{
unsigned int count = nargs;
tree p = parms;
bool type_pack_p;
do
{
type_pack_p = TREE_CODE (TREE_VALUE (p)) == TYPE_PACK_EXPANSION;
if (!type_pack_p)
count++;
p = TREE_CHAIN (p);
}
while (p && p != void_list_node);
if (count != nargs)
return unify_too_few_arguments (explain_p, ia, count,
type_pack_p);
}
if (!subr)
{
tsubst_flags_t complain = (explain_p
? tf_warning_or_error
: tf_none);
bool tried_array_deduction = (cxx_dialect < cxx1z);
for (i = 0; i < ntparms; i++)
{
tree targ = TREE_VEC_ELT (targs, i);
tree tparm = TREE_VEC_ELT (tparms, i);
/* Clear the "incomplete" flags on all argument packs now so that
substituting them into later default arguments works. */
if (targ && ARGUMENT_PACK_P (targ))
{
ARGUMENT_PACK_INCOMPLETE_P (targ) = 0;
ARGUMENT_PACK_EXPLICIT_ARGS (targ) = NULL_TREE;
}
if (targ || tparm == error_mark_node)
continue;
tparm = TREE_VALUE (tparm);
if (TREE_CODE (tparm) == TYPE_DECL
&& !tried_array_deduction)
{
try_array_deduction (tparms, targs, xparms);
tried_array_deduction = true;
if (TREE_VEC_ELT (targs, i))
continue;
}
/* If this is an undeduced nontype parameter that depends on
a type parameter, try another pass; its type may have been
deduced from a later argument than the one from which
this parameter can be deduced. */
if (TREE_CODE (tparm) == PARM_DECL
&& uses_template_parms (TREE_TYPE (tparm))
&& saw_undeduced < 2)
{
saw_undeduced = 1;
continue;
}
/* Core issue #226 (C++0x) [temp.deduct]:
If a template argument has not been deduced, its
default template argument, if any, is used.
When we are in C++98 mode, TREE_PURPOSE will either
be NULL_TREE or ERROR_MARK_NODE, so we do not need
to explicitly check cxx_dialect here. */
if (TREE_PURPOSE (TREE_VEC_ELT (tparms, i)))
/* OK, there is a default argument. Wait until after the
conversion check to do substitution. */
continue;
/* If the type parameter is a parameter pack, then it will
be deduced to an empty parameter pack. */
if (template_parameter_pack_p (tparm))
{
tree arg;
if (TREE_CODE (tparm) == TEMPLATE_PARM_INDEX)
{
arg = make_node (NONTYPE_ARGUMENT_PACK);
TREE_TYPE (arg) = TREE_TYPE (TEMPLATE_PARM_DECL (tparm));
TREE_CONSTANT (arg) = 1;
}
else
arg = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (arg, make_tree_vec (0));
TREE_VEC_ELT (targs, i) = arg;
continue;
}
return unify_parameter_deduction_failure (explain_p, tparm);
}
/* DR 1391: All parameters have args, now check non-dependent parms for
convertibility. */
if (saw_undeduced < 2)
for (ia = 0, parms = xparms, args = xargs, nargs = xnargs;
parms && parms != void_list_node && ia < nargs; )
{
parm = TREE_VALUE (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION
&& (!TREE_CHAIN (parms)
|| TREE_CHAIN (parms) == void_list_node))
/* For a function parameter pack that occurs at the end of the
parameter-declaration-list, the type A of each remaining
argument of the call is compared with the type P of the
declarator-id of the function parameter pack. */
break;
parms = TREE_CHAIN (parms);
if (TREE_CODE (parm) == TYPE_PACK_EXPANSION)
/* For a function parameter pack that does not occur at the
end of the parameter-declaration-list, the type of the
parameter pack is a non-deduced context. */
continue;
arg = args[ia];
++ia;
if (uses_template_parms (parm))
continue;
if (check_non_deducible_conversion (parm, arg, strict, flags,
explain_p))
return 1;
}
/* Now substitute into the default template arguments. */
for (i = 0; i < ntparms; i++)
{
tree targ = TREE_VEC_ELT (targs, i);
tree tparm = TREE_VEC_ELT (tparms, i);
if (targ || tparm == error_mark_node)
continue;
tree parm = TREE_VALUE (tparm);
if (TREE_CODE (parm) == PARM_DECL
&& uses_template_parms (TREE_TYPE (parm))
&& saw_undeduced < 2)
continue;
tree arg = TREE_PURPOSE (tparm);
reopen_deferring_access_checks (*checks);
location_t save_loc = input_location;
if (DECL_P (parm))
input_location = DECL_SOURCE_LOCATION (parm);
if (saw_undeduced == 1)
++processing_template_decl;
arg = tsubst_template_arg (arg, full_targs, complain, NULL_TREE);
if (saw_undeduced == 1)
--processing_template_decl;
if (arg != error_mark_node && !uses_template_parms (arg))
arg = convert_template_argument (parm, arg, full_targs, complain,
i, NULL_TREE);
else if (saw_undeduced == 1)
arg = NULL_TREE;
else
arg = error_mark_node;
input_location = save_loc;
*checks = get_deferred_access_checks ();
pop_deferring_access_checks ();
if (arg == error_mark_node)
return 1;
else if (arg)
{
TREE_VEC_ELT (targs, i) = arg;
/* The position of the first default template argument,
is also the number of non-defaulted arguments in TARGS.
Record that. */
if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, i);
}
}
if (saw_undeduced++ == 1)
goto again;
}
if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs))
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, TREE_VEC_LENGTH (targs));
return unify_success (explain_p);
}
/* Subroutine of type_unification_real. Args are like the variables
at the call site. ARG is an overloaded function (or template-id);
we try deducing template args from each of the overloads, and if
only one succeeds, we go with that. Modifies TARGS and returns
true on success. */
static bool
resolve_overloaded_unification (tree tparms,
tree targs,
tree parm,
tree arg,
unification_kind_t strict,
int sub_strict,
bool explain_p)
{
tree tempargs = copy_node (targs);
int good = 0;
tree goodfn = NULL_TREE;
bool addr_p;
if (TREE_CODE (arg) == ADDR_EXPR)
{
arg = TREE_OPERAND (arg, 0);
addr_p = true;
}
else
addr_p = false;
if (TREE_CODE (arg) == COMPONENT_REF)
/* Handle `&x' where `x' is some static or non-static member
function name. */
arg = TREE_OPERAND (arg, 1);
if (TREE_CODE (arg) == OFFSET_REF)
arg = TREE_OPERAND (arg, 1);
/* Strip baselink information. */
if (BASELINK_P (arg))
arg = BASELINK_FUNCTIONS (arg);
if (TREE_CODE (arg) == TEMPLATE_ID_EXPR)
{
/* If we got some explicit template args, we need to plug them into
the affected templates before we try to unify, in case the
explicit args will completely resolve the templates in question. */
int ok = 0;
tree expl_subargs = TREE_OPERAND (arg, 1);
arg = TREE_OPERAND (arg, 0);
for (; arg; arg = OVL_NEXT (arg))
{
tree fn = OVL_CURRENT (arg);
tree subargs, elem;
if (TREE_CODE (fn) != TEMPLATE_DECL)
continue;
subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn),
expl_subargs, NULL_TREE, tf_none,
/*require_all_args=*/true,
/*use_default_args=*/true);
if (subargs != error_mark_node
&& !any_dependent_template_arguments_p (subargs))
{
elem = TREE_TYPE (instantiate_template (fn, subargs, tf_none));
if (try_one_overload (tparms, targs, tempargs, parm,
elem, strict, sub_strict, addr_p, explain_p)
&& (!goodfn || !same_type_p (goodfn, elem)))
{
goodfn = elem;
++good;
}
}
else if (subargs)
++ok;
}
/* If no templates (or more than one) are fully resolved by the
explicit arguments, this template-id is a non-deduced context; it
could still be OK if we deduce all template arguments for the
enclosing call through other arguments. */
if (good != 1)
good = ok;
}
else if (TREE_CODE (arg) != OVERLOAD
&& TREE_CODE (arg) != FUNCTION_DECL)
/* If ARG is, for example, "(0, &f)" then its type will be unknown
-- but the deduction does not succeed because the expression is
not just the function on its own. */
return false;
else
for (; arg; arg = OVL_NEXT (arg))
if (try_one_overload (tparms, targs, tempargs, parm,
TREE_TYPE (OVL_CURRENT (arg)),
strict, sub_strict, addr_p, explain_p)
&& (!goodfn || !decls_match (goodfn, OVL_CURRENT (arg))))
{
goodfn = OVL_CURRENT (arg);
++good;
}
/* [temp.deduct.type] A template-argument can be deduced from a pointer
to function or pointer to member function argument if the set of
overloaded functions does not contain function templates and at most
one of a set of overloaded functions provides a unique match.
So if we found multiple possibilities, we return success but don't
deduce anything. */
if (good == 1)
{
int i = TREE_VEC_LENGTH (targs);
for (; i--; )
if (TREE_VEC_ELT (tempargs, i))
{
tree old = TREE_VEC_ELT (targs, i);
tree new_ = TREE_VEC_ELT (tempargs, i);
if (new_ && old && ARGUMENT_PACK_P (old)
&& ARGUMENT_PACK_EXPLICIT_ARGS (old))
/* Don't forget explicit template arguments in a pack. */
ARGUMENT_PACK_EXPLICIT_ARGS (new_)
= ARGUMENT_PACK_EXPLICIT_ARGS (old);
TREE_VEC_ELT (targs, i) = new_;
}
}
if (good)
return true;
return false;
}
/* Core DR 115: In contexts where deduction is done and fails, or in
contexts where deduction is not done, if a template argument list is
specified and it, along with any default template arguments, identifies
a single function template specialization, then the template-id is an
lvalue for the function template specialization. */
tree
resolve_nondeduced_context (tree orig_expr, tsubst_flags_t complain)
{
tree expr, offset, baselink;
bool addr;
if (!type_unknown_p (orig_expr))
return orig_expr;
expr = orig_expr;
addr = false;
offset = NULL_TREE;
baselink = NULL_TREE;
if (TREE_CODE (expr) == ADDR_EXPR)
{
expr = TREE_OPERAND (expr, 0);
addr = true;
}
if (TREE_CODE (expr) == OFFSET_REF)
{
offset = expr;
expr = TREE_OPERAND (expr, 1);
}
if (BASELINK_P (expr))
{
baselink = expr;
expr = BASELINK_FUNCTIONS (expr);
}
if (TREE_CODE (expr) == TEMPLATE_ID_EXPR)
{
int good = 0;
tree goodfn = NULL_TREE;
/* If we got some explicit template args, we need to plug them into
the affected templates before we try to unify, in case the
explicit args will completely resolve the templates in question. */
tree expl_subargs = TREE_OPERAND (expr, 1);
tree arg = TREE_OPERAND (expr, 0);
tree badfn = NULL_TREE;
tree badargs = NULL_TREE;
for (; arg; arg = OVL_NEXT (arg))
{
tree fn = OVL_CURRENT (arg);
tree subargs, elem;
if (TREE_CODE (fn) != TEMPLATE_DECL)
continue;
subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn),
expl_subargs, NULL_TREE, tf_none,
/*require_all_args=*/true,
/*use_default_args=*/true);
if (subargs != error_mark_node
&& !any_dependent_template_arguments_p (subargs))
{
elem = instantiate_template (fn, subargs, tf_none);
if (elem == error_mark_node)
{
badfn = fn;
badargs = subargs;
}
else if (elem && (!goodfn || !decls_match (goodfn, elem)))
{
goodfn = elem;
++good;
}
}
}
if (good == 1)
{
mark_used (goodfn);
expr = goodfn;
if (baselink)
expr = build_baselink (BASELINK_BINFO (baselink),
BASELINK_ACCESS_BINFO (baselink),
expr, BASELINK_OPTYPE (baselink));
if (offset)
{
tree base
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (offset, 0)));
expr = build_offset_ref (base, expr, addr, complain);
}
if (addr)
expr = cp_build_addr_expr (expr, complain);
return expr;
}
else if (good == 0 && badargs && (complain & tf_error))
/* There were no good options and at least one bad one, so let the
user know what the problem is. */
instantiate_template (badfn, badargs, complain);
}
return orig_expr;
}
/* Subroutine of resolve_overloaded_unification; does deduction for a single
overload. Fills TARGS with any deduced arguments, or error_mark_node if
different overloads deduce different arguments for a given parm.
ADDR_P is true if the expression for which deduction is being
performed was of the form "& fn" rather than simply "fn".
Returns 1 on success. */
static int
try_one_overload (tree tparms,
tree orig_targs,
tree targs,
tree parm,
tree arg,
unification_kind_t strict,
int sub_strict,
bool addr_p,
bool explain_p)
{
int nargs;
tree tempargs;
int i;
if (arg == error_mark_node)
return 0;
/* [temp.deduct.type] A template-argument can be deduced from a pointer
to function or pointer to member function argument if the set of
overloaded functions does not contain function templates and at most
one of a set of overloaded functions provides a unique match.
So if this is a template, just return success. */
if (uses_template_parms (arg))
return 1;
if (TREE_CODE (arg) == METHOD_TYPE)
arg = build_ptrmemfunc_type (build_pointer_type (arg));
else if (addr_p)
arg = build_pointer_type (arg);
sub_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, NULL);
/* We don't copy orig_targs for this because if we have already deduced
some template args from previous args, unify would complain when we
try to deduce a template parameter for the same argument, even though
there isn't really a conflict. */
nargs = TREE_VEC_LENGTH (targs);
tempargs = make_tree_vec (nargs);
if (unify (tparms, tempargs, parm, arg, sub_strict, explain_p))
return 0;
/* First make sure we didn't deduce anything that conflicts with
explicitly specified args. */
for (i = nargs; i--; )
{
tree elt = TREE_VEC_ELT (tempargs, i);
tree oldelt = TREE_VEC_ELT (orig_targs, i);
if (!elt)
/*NOP*/;
else if (uses_template_parms (elt))
/* Since we're unifying against ourselves, we will fill in
template args used in the function parm list with our own
template parms. Discard them. */
TREE_VEC_ELT (tempargs, i) = NULL_TREE;
else if (oldelt && ARGUMENT_PACK_P (oldelt))
{
/* Check that the argument at each index of the deduced argument pack
is equivalent to the corresponding explicitly specified argument.
We may have deduced more arguments than were explicitly specified,
and that's OK. */
/* We used to assert ARGUMENT_PACK_INCOMPLETE_P (oldelt) here, but
that's wrong if we deduce the same argument pack from multiple
function arguments: it's only incomplete the first time. */
tree explicit_pack = ARGUMENT_PACK_ARGS (oldelt);
tree deduced_pack = ARGUMENT_PACK_ARGS (elt);
if (TREE_VEC_LENGTH (deduced_pack)
< TREE_VEC_LENGTH (explicit_pack))
return 0;
for (int j = 0; j < TREE_VEC_LENGTH (explicit_pack); j++)
if (!template_args_equal (TREE_VEC_ELT (explicit_pack, j),
TREE_VEC_ELT (deduced_pack, j)))
return 0;
}
else if (oldelt && !template_args_equal (oldelt, elt))
return 0;
}
for (i = nargs; i--; )
{
tree elt = TREE_VEC_ELT (tempargs, i);
if (elt)
TREE_VEC_ELT (targs, i) = elt;
}
return 1;
}
/* PARM is a template class (perhaps with unbound template
parameters). ARG is a fully instantiated type. If ARG can be
bound to PARM, return ARG, otherwise return NULL_TREE. TPARMS and
TARGS are as for unify. */
static tree
try_class_unification (tree tparms, tree targs, tree parm, tree arg,
bool explain_p)
{
tree copy_of_targs;
if (!CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg))
return NULL_TREE;
else if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
/* Matches anything. */;
else if (most_general_template (CLASSTYPE_TI_TEMPLATE (arg))
!= most_general_template (CLASSTYPE_TI_TEMPLATE (parm)))
return NULL_TREE;
/* We need to make a new template argument vector for the call to
unify. If we used TARGS, we'd clutter it up with the result of
the attempted unification, even if this class didn't work out.
We also don't want to commit ourselves to all the unifications
we've already done, since unification is supposed to be done on
an argument-by-argument basis. In other words, consider the
following pathological case:
template <int I, int J, int K>
struct S {};
template <int I, int J>
struct S<I, J, 2> : public S<I, I, I>, S<J, J, J> {};
template <int I, int J, int K>
void f(S<I, J, K>, S<I, I, I>);
void g() {
S<0, 0, 0> s0;
S<0, 1, 2> s2;
f(s0, s2);
}
Now, by the time we consider the unification involving `s2', we
already know that we must have `f<0, 0, 0>'. But, even though
`S<0, 1, 2>' is derived from `S<0, 0, 0>', the code is invalid
because there are two ways to unify base classes of S<0, 1, 2>
with S<I, I, I>. If we kept the already deduced knowledge, we
would reject the possibility I=1. */
copy_of_targs = make_tree_vec (TREE_VEC_LENGTH (targs));
if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
if (unify_bound_ttp_args (tparms, copy_of_targs, parm, arg, explain_p))
return NULL_TREE;
return arg;
}
/* If unification failed, we're done. */
if (unify (tparms, copy_of_targs, CLASSTYPE_TI_ARGS (parm),
CLASSTYPE_TI_ARGS (arg), UNIFY_ALLOW_NONE, explain_p))
return NULL_TREE;
return arg;
}
/* Given a template type PARM and a class type ARG, find the unique
base type in ARG that is an instance of PARM. We do not examine
ARG itself; only its base-classes. If there is not exactly one
appropriate base class, return NULL_TREE. PARM may be the type of
a partial specialization, as well as a plain template type. Used
by unify. */
static enum template_base_result
get_template_base (tree tparms, tree targs, tree parm, tree arg,
bool explain_p, tree *result)
{
tree rval = NULL_TREE;
tree binfo;
gcc_assert (RECORD_OR_UNION_CODE_P (TREE_CODE (arg)));
binfo = TYPE_BINFO (complete_type (arg));
if (!binfo)
{
/* The type could not be completed. */
*result = NULL_TREE;
return tbr_incomplete_type;
}
/* Walk in inheritance graph order. The search order is not
important, and this avoids multiple walks of virtual bases. */
for (binfo = TREE_CHAIN (binfo); binfo; binfo = TREE_CHAIN (binfo))
{
tree r = try_class_unification (tparms, targs, parm,
BINFO_TYPE (binfo), explain_p);
if (r)
{
/* If there is more than one satisfactory baseclass, then:
[temp.deduct.call]
If they yield more than one possible deduced A, the type
deduction fails.
applies. */
if (rval && !same_type_p (r, rval))
{
*result = NULL_TREE;
return tbr_ambiguous_baseclass;
}
rval = r;
}
}
*result = rval;
return tbr_success;
}
/* Returns the level of DECL, which declares a template parameter. */
static int
template_decl_level (tree decl)
{
switch (TREE_CODE (decl))
{
case TYPE_DECL:
case TEMPLATE_DECL:
return TEMPLATE_TYPE_LEVEL (TREE_TYPE (decl));
case PARM_DECL:
return TEMPLATE_PARM_LEVEL (DECL_INITIAL (decl));
default:
gcc_unreachable ();
}
return 0;
}
/* Decide whether ARG can be unified with PARM, considering only the
cv-qualifiers of each type, given STRICT as documented for unify.
Returns nonzero iff the unification is OK on that basis. */
static int
check_cv_quals_for_unify (int strict, tree arg, tree parm)
{
int arg_quals = cp_type_quals (arg);
int parm_quals = cp_type_quals (parm);
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
&& !(strict & UNIFY_ALLOW_OUTER_MORE_CV_QUAL))
{
/* Although a CVR qualifier is ignored when being applied to a
substituted template parameter ([8.3.2]/1 for example), that
does not allow us to unify "const T" with "int&" because both
types are not of the form "cv-list T" [14.8.2.5 temp.deduct.type].
It is ok when we're allowing additional CV qualifiers
at the outer level [14.8.2.1]/3,1st bullet. */
if ((TREE_CODE (arg) == REFERENCE_TYPE
|| TREE_CODE (arg) == FUNCTION_TYPE
|| TREE_CODE (arg) == METHOD_TYPE)
&& (parm_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)))
return 0;
if ((!POINTER_TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_TYPE_PARM)
&& (parm_quals & TYPE_QUAL_RESTRICT))
return 0;
}
if (!(strict & (UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_OUTER_MORE_CV_QUAL))
&& (arg_quals & parm_quals) != parm_quals)
return 0;
if (!(strict & (UNIFY_ALLOW_LESS_CV_QUAL | UNIFY_ALLOW_OUTER_LESS_CV_QUAL))
&& (parm_quals & arg_quals) != arg_quals)
return 0;
return 1;
}
/* Determines the LEVEL and INDEX for the template parameter PARM. */
void
template_parm_level_and_index (tree parm, int* level, int* index)
{
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
*index = TEMPLATE_TYPE_IDX (parm);
*level = TEMPLATE_TYPE_LEVEL (parm);
}
else
{
*index = TEMPLATE_PARM_IDX (parm);
*level = TEMPLATE_PARM_LEVEL (parm);
}
}
#define RECUR_AND_CHECK_FAILURE(TP, TA, P, A, S, EP) \
do { \
if (unify (TP, TA, P, A, S, EP)) \
return 1; \
} while (0)
/* Unifies the remaining arguments in PACKED_ARGS with the pack
expansion at the end of PACKED_PARMS. Returns 0 if the type
deduction succeeds, 1 otherwise. STRICT is the same as in
fn_type_unification. CALL_ARGS_P is true iff PACKED_ARGS is actually a
function call argument list. We'll need to adjust the arguments to make them
types. SUBR tells us if this is from a recursive call to
type_unification_real, or for comparing two template argument
lists. */
static int
unify_pack_expansion (tree tparms, tree targs, tree packed_parms,
tree packed_args, unification_kind_t strict,
bool subr, bool explain_p)
{
tree parm
= TREE_VEC_ELT (packed_parms, TREE_VEC_LENGTH (packed_parms) - 1);
tree pattern = PACK_EXPANSION_PATTERN (parm);
tree pack, packs = NULL_TREE;
int i, start = TREE_VEC_LENGTH (packed_parms) - 1;
/* Add in any args remembered from an earlier partial instantiation. */
targs = add_to_template_args (PACK_EXPANSION_EXTRA_ARGS (parm), targs);
int levels = TMPL_ARGS_DEPTH (targs);
packed_args = expand_template_argument_pack (packed_args);
int len = TREE_VEC_LENGTH (packed_args);
/* Determine the parameter packs we will be deducing from the
pattern, and record their current deductions. */
for (pack = PACK_EXPANSION_PARAMETER_PACKS (parm);
pack; pack = TREE_CHAIN (pack))
{
tree parm_pack = TREE_VALUE (pack);
int idx, level;
/* Determine the index and level of this parameter pack. */
template_parm_level_and_index (parm_pack, &level, &idx);
if (level < levels)
continue;
/* Keep track of the parameter packs and their corresponding
argument packs. */
packs = tree_cons (parm_pack, TMPL_ARG (targs, level, idx), packs);
TREE_TYPE (packs) = make_tree_vec (len - start);
}
/* Loop through all of the arguments that have not yet been
unified and unify each with the pattern. */
for (i = start; i < len; i++)
{
tree parm;
bool any_explicit = false;
tree arg = TREE_VEC_ELT (packed_args, i);
/* For each parameter pack, set its TMPL_ARG to either NULL_TREE
or the element of its argument pack at the current index if
this argument was explicitly specified. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
int idx, level;
tree arg, pargs;
template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx);
arg = NULL_TREE;
if (TREE_VALUE (pack)
&& (pargs = ARGUMENT_PACK_EXPLICIT_ARGS (TREE_VALUE (pack)))
&& (i - start < TREE_VEC_LENGTH (pargs)))
{
any_explicit = true;
arg = TREE_VEC_ELT (pargs, i - start);
}
TMPL_ARG (targs, level, idx) = arg;
}
/* If we had explicit template arguments, substitute them into the
pattern before deduction. */
if (any_explicit)
{
/* Some arguments might still be unspecified or dependent. */
bool dependent;
++processing_template_decl;
dependent = any_dependent_template_arguments_p (targs);
if (!dependent)
--processing_template_decl;
parm = tsubst (pattern, targs,
explain_p ? tf_warning_or_error : tf_none,
NULL_TREE);
if (dependent)
--processing_template_decl;
if (parm == error_mark_node)
return 1;
}
else
parm = pattern;
/* Unify the pattern with the current argument. */
if (unify_one_argument (tparms, targs, parm, arg, subr, strict,
explain_p))
return 1;
/* For each parameter pack, collect the deduced value. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
int idx, level;
template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx);
TREE_VEC_ELT (TREE_TYPE (pack), i - start) =
TMPL_ARG (targs, level, idx);
}
}
/* Verify that the results of unification with the parameter packs
produce results consistent with what we've seen before, and make
the deduced argument packs available. */
for (pack = packs; pack; pack = TREE_CHAIN (pack))
{
tree old_pack = TREE_VALUE (pack);
tree new_args = TREE_TYPE (pack);
int i, len = TREE_VEC_LENGTH (new_args);
int idx, level;
bool nondeduced_p = false;
/* By default keep the original deduced argument pack.
If necessary, more specific code is going to update the
resulting deduced argument later down in this function. */
template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx);
TMPL_ARG (targs, level, idx) = old_pack;
/* If NEW_ARGS contains any NULL_TREE entries, we didn't
actually deduce anything. */
for (i = 0; i < len && !nondeduced_p; ++i)
if (TREE_VEC_ELT (new_args, i) == NULL_TREE)
nondeduced_p = true;
if (nondeduced_p)
continue;
if (old_pack && ARGUMENT_PACK_INCOMPLETE_P (old_pack))
{
/* If we had fewer function args than explicit template args,
just use the explicits. */
tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack);
int explicit_len = TREE_VEC_LENGTH (explicit_args);
if (len < explicit_len)
new_args = explicit_args;
}
if (!old_pack)
{
tree result;
/* Build the deduced *_ARGUMENT_PACK. */
if (TREE_CODE (TREE_PURPOSE (pack)) == TEMPLATE_PARM_INDEX)
{
result = make_node (NONTYPE_ARGUMENT_PACK);
TREE_TYPE (result) =
TREE_TYPE (TEMPLATE_PARM_DECL (TREE_PURPOSE (pack)));
TREE_CONSTANT (result) = 1;
}
else
result = cxx_make_type (TYPE_ARGUMENT_PACK);
SET_ARGUMENT_PACK_ARGS (result, new_args);
/* Note the deduced argument packs for this parameter
pack. */
TMPL_ARG (targs, level, idx) = result;
}
else if (ARGUMENT_PACK_INCOMPLETE_P (old_pack)
&& (ARGUMENT_PACK_ARGS (old_pack)
== ARGUMENT_PACK_EXPLICIT_ARGS (old_pack)))
{
/* We only had the explicitly-provided arguments before, but
now we have a complete set of arguments. */
tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack);
SET_ARGUMENT_PACK_ARGS (old_pack, new_args);
ARGUMENT_PACK_INCOMPLETE_P (old_pack) = 1;
ARGUMENT_PACK_EXPLICIT_ARGS (old_pack) = explicit_args;
}
else
{
tree bad_old_arg = NULL_TREE, bad_new_arg = NULL_TREE;
tree old_args = ARGUMENT_PACK_ARGS (old_pack);
if (!comp_template_args (old_args, new_args,
&bad_old_arg, &bad_new_arg))
/* Inconsistent unification of this parameter pack. */
return unify_parameter_pack_inconsistent (explain_p,
bad_old_arg,
bad_new_arg);
}
}
return unify_success (explain_p);
}
/* Handle unification of the domain of an array. PARM_DOM and ARG_DOM are
INTEGER_TYPEs representing the TYPE_DOMAIN of ARRAY_TYPEs. The other
parameters and return value are as for unify. */
static int
unify_array_domain (tree tparms, tree targs,
tree parm_dom, tree arg_dom,
bool explain_p)
{
tree parm_max;
tree arg_max;
bool parm_cst;
bool arg_cst;
/* Our representation of array types uses "N - 1" as the
TYPE_MAX_VALUE for an array with "N" elements, if "N" is
not an integer constant. We cannot unify arbitrarily
complex expressions, so we eliminate the MINUS_EXPRs
here. */
parm_max = TYPE_MAX_VALUE (parm_dom);
parm_cst = TREE_CODE (parm_max) == INTEGER_CST;
if (!parm_cst)
{
gcc_assert (TREE_CODE (parm_max) == MINUS_EXPR);
parm_max = TREE_OPERAND (parm_max, 0);
}
arg_max = TYPE_MAX_VALUE (arg_dom);
arg_cst = TREE_CODE (arg_max) == INTEGER_CST;
if (!arg_cst)
{
/* The ARG_MAX may not be a simple MINUS_EXPR, if we are
trying to unify the type of a variable with the type
of a template parameter. For example:
template <unsigned int N>
void f (char (&) [N]);
int g();
void h(int i) {
char a[g(i)];
f(a);
}
Here, the type of the ARG will be "int [g(i)]", and
may be a SAVE_EXPR, etc. */
if (TREE_CODE (arg_max) != MINUS_EXPR)
return unify_vla_arg (explain_p, arg_dom);
arg_max = TREE_OPERAND (arg_max, 0);
}
/* If only one of the bounds used a MINUS_EXPR, compensate
by adding one to the other bound. */
if (parm_cst && !arg_cst)
parm_max = fold_build2_loc (input_location, PLUS_EXPR,
integer_type_node,
parm_max,
integer_one_node);
else if (arg_cst && !parm_cst)
arg_max = fold_build2_loc (input_location, PLUS_EXPR,
integer_type_node,
arg_max,
integer_one_node);
return unify (tparms, targs, parm_max, arg_max,
UNIFY_ALLOW_INTEGER, explain_p);
}
/* Returns whether T, a P or A in unify, is a type, template or expression. */
enum pa_kind_t { pa_type, pa_tmpl, pa_expr };
static pa_kind_t
pa_kind (tree t)
{
if (PACK_EXPANSION_P (t))
t = PACK_EXPANSION_PATTERN (t);
if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (t) == UNBOUND_CLASS_TEMPLATE
|| DECL_TYPE_TEMPLATE_P (t))
return pa_tmpl;
else if (TYPE_P (t))
return pa_type;
else
return pa_expr;
}
/* Deduce the value of template parameters. TPARMS is the (innermost)
set of template parameters to a template. TARGS is the bindings
for those template parameters, as determined thus far; TARGS may
include template arguments for outer levels of template parameters
as well. PARM is a parameter to a template function, or a
subcomponent of that parameter; ARG is the corresponding argument.
This function attempts to match PARM with ARG in a manner
consistent with the existing assignments in TARGS. If more values
are deduced, then TARGS is updated.
Returns 0 if the type deduction succeeds, 1 otherwise. The
parameter STRICT is a bitwise or of the following flags:
UNIFY_ALLOW_NONE:
Require an exact match between PARM and ARG.
UNIFY_ALLOW_MORE_CV_QUAL:
Allow the deduced ARG to be more cv-qualified (by qualification
conversion) than ARG.
UNIFY_ALLOW_LESS_CV_QUAL:
Allow the deduced ARG to be less cv-qualified than ARG.
UNIFY_ALLOW_DERIVED:
Allow the deduced ARG to be a template base class of ARG,
or a pointer to a template base class of the type pointed to by
ARG.
UNIFY_ALLOW_INTEGER:
Allow any integral type to be deduced. See the TEMPLATE_PARM_INDEX
case for more information.
UNIFY_ALLOW_OUTER_LEVEL:
This is the outermost level of a deduction. Used to determine validity
of qualification conversions. A valid qualification conversion must
have const qualified pointers leading up to the inner type which
requires additional CV quals, except at the outer level, where const
is not required [conv.qual]. It would be normal to set this flag in
addition to setting UNIFY_ALLOW_MORE_CV_QUAL.
UNIFY_ALLOW_OUTER_MORE_CV_QUAL:
This is the outermost level of a deduction, and PARM can be more CV
qualified at this point.
UNIFY_ALLOW_OUTER_LESS_CV_QUAL:
This is the outermost level of a deduction, and PARM can be less CV
qualified at this point. */
static int
unify (tree tparms, tree targs, tree parm, tree arg, int strict,
bool explain_p)
{
int idx;
tree targ;
tree tparm;
int strict_in = strict;
tsubst_flags_t complain = (explain_p
? tf_warning_or_error
: tf_none);
/* I don't think this will do the right thing with respect to types.
But the only case I've seen it in so far has been array bounds, where
signedness is the only information lost, and I think that will be
okay. */
while (CONVERT_EXPR_P (parm))
parm = TREE_OPERAND (parm, 0);
if (arg == error_mark_node)
return unify_invalid (explain_p);
if (arg == unknown_type_node
|| arg == init_list_type_node)
/* We can't deduce anything from this, but we might get all the
template args from other function args. */
return unify_success (explain_p);
if (parm == any_targ_node || arg == any_targ_node)
return unify_success (explain_p);
/* If PARM uses template parameters, then we can't bail out here,
even if ARG == PARM, since we won't record unifications for the
template parameters. We might need them if we're trying to
figure out which of two things is more specialized. */
if (arg == parm && !uses_template_parms (parm))
return unify_success (explain_p);
/* Handle init lists early, so the rest of the function can assume
we're dealing with a type. */
if (BRACE_ENCLOSED_INITIALIZER_P (arg))
{
tree elt, elttype;
unsigned i;
tree orig_parm = parm;
/* Replace T with std::initializer_list<T> for deduction. */
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
&& flag_deduce_init_list)
parm = listify (parm);
if (!is_std_init_list (parm)
&& TREE_CODE (parm) != ARRAY_TYPE)
/* We can only deduce from an initializer list argument if the
parameter is std::initializer_list or an array; otherwise this
is a non-deduced context. */
return unify_success (explain_p);
if (TREE_CODE (parm) == ARRAY_TYPE)
elttype = TREE_TYPE (parm);
else
{
elttype = TREE_VEC_ELT (CLASSTYPE_TI_ARGS (parm), 0);
/* Deduction is defined in terms of a single type, so just punt
on the (bizarre) std::initializer_list<T...>. */
if (PACK_EXPANSION_P (elttype))
return unify_success (explain_p);
}
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (arg), i, elt)
{
int elt_strict = strict;
if (elt == error_mark_node)
return unify_invalid (explain_p);
if (!BRACE_ENCLOSED_INITIALIZER_P (elt))
{
tree type = TREE_TYPE (elt);
if (type == error_mark_node)
return unify_invalid (explain_p);
/* It should only be possible to get here for a call. */
gcc_assert (elt_strict & UNIFY_ALLOW_OUTER_LEVEL);
elt_strict |= maybe_adjust_types_for_deduction
(DEDUCE_CALL, &elttype, &type, elt);
elt = type;
}
RECUR_AND_CHECK_FAILURE (tparms, targs, elttype, elt, elt_strict,
explain_p);
}
if (TREE_CODE (parm) == ARRAY_TYPE
&& deducible_array_bound (TYPE_DOMAIN (parm)))
{
/* Also deduce from the length of the initializer list. */
tree max = size_int (CONSTRUCTOR_NELTS (arg));
tree idx = compute_array_index_type (NULL_TREE, max, tf_none);
if (idx == error_mark_node)
return unify_invalid (explain_p);
return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm),
idx, explain_p);
}
/* If the std::initializer_list<T> deduction worked, replace the
deduced A with std::initializer_list<A>. */
if (orig_parm != parm)
{
idx = TEMPLATE_TYPE_IDX (orig_parm);
targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx);
targ = listify (targ);
TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = targ;
}
return unify_success (explain_p);
}
/* If parm and arg aren't the same kind of thing (template, type, or
expression), fail early. */
if (pa_kind (parm) != pa_kind (arg))
return unify_invalid (explain_p);
/* Immediately reject some pairs that won't unify because of
cv-qualification mismatches. */
if (TREE_CODE (arg) == TREE_CODE (parm)
&& TYPE_P (arg)
/* It is the elements of the array which hold the cv quals of an array
type, and the elements might be template type parms. We'll check
when we recurse. */
&& TREE_CODE (arg) != ARRAY_TYPE
/* We check the cv-qualifiers when unifying with template type
parameters below. We want to allow ARG `const T' to unify with
PARM `T' for example, when computing which of two templates
is more specialized, for example. */
&& TREE_CODE (arg) != TEMPLATE_TYPE_PARM
&& !check_cv_quals_for_unify (strict_in, arg, parm))
return unify_cv_qual_mismatch (explain_p, parm, arg);
if (!(strict & UNIFY_ALLOW_OUTER_LEVEL)
&& TYPE_P (parm) && !CP_TYPE_CONST_P (parm))
strict &= ~UNIFY_ALLOW_MORE_CV_QUAL;
strict &= ~UNIFY_ALLOW_OUTER_LEVEL;
strict &= ~UNIFY_ALLOW_DERIVED;
strict &= ~UNIFY_ALLOW_OUTER_MORE_CV_QUAL;
strict &= ~UNIFY_ALLOW_OUTER_LESS_CV_QUAL;
switch (TREE_CODE (parm))
{
case TYPENAME_TYPE:
case SCOPE_REF:
case UNBOUND_CLASS_TEMPLATE:
/* In a type which contains a nested-name-specifier, template
argument values cannot be deduced for template parameters used
within the nested-name-specifier. */
return unify_success (explain_p);
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0));
if (error_operand_p (tparm))
return unify_invalid (explain_p);
if (TEMPLATE_TYPE_LEVEL (parm)
!= template_decl_level (tparm))
/* The PARM is not one we're trying to unify. Just check
to see if it matches ARG. */
{
if (TREE_CODE (arg) == TREE_CODE (parm)
&& (is_auto (parm) ? is_auto (arg)
: same_type_p (parm, arg)))
return unify_success (explain_p);
else
return unify_type_mismatch (explain_p, parm, arg);
}
idx = TEMPLATE_TYPE_IDX (parm);
targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx);
tparm = TREE_VALUE (TREE_VEC_ELT (tparms, idx));
if (error_operand_p (tparm))
return unify_invalid (explain_p);
/* Check for mixed types and values. */
if ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM
&& TREE_CODE (tparm) != TYPE_DECL)
|| (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM
&& TREE_CODE (tparm) != TEMPLATE_DECL))
gcc_unreachable ();
if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
if ((strict_in & UNIFY_ALLOW_DERIVED)
&& CLASS_TYPE_P (arg))
{
/* First try to match ARG directly. */
tree t = try_class_unification (tparms, targs, parm, arg,
explain_p);
if (!t)
{
/* Otherwise, look for a suitable base of ARG, as below. */
enum template_base_result r;
r = get_template_base (tparms, targs, parm, arg,
explain_p, &t);
if (!t)
return unify_no_common_base (explain_p, r, parm, arg);
arg = t;
}
}
/* ARG must be constructed from a template class or a template
template parameter. */
else if (TREE_CODE (arg) != BOUND_TEMPLATE_TEMPLATE_PARM
&& !CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg))
return unify_template_deduction_failure (explain_p, parm, arg);
/* Deduce arguments T, i from TT<T> or TT<i>. */
if (unify_bound_ttp_args (tparms, targs, parm, arg, explain_p))
return 1;
arg = TYPE_TI_TEMPLATE (arg);
/* Fall through to deduce template name. */
}
if (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM)
{
/* Deduce template name TT from TT, TT<>, TT<T> and TT<i>. */
/* Simple cases: Value already set, does match or doesn't. */
if (targ != NULL_TREE && template_args_equal (targ, arg))
return unify_success (explain_p);
else if (targ)
return unify_inconsistency (explain_p, parm, targ, arg);
}
else
{
/* If PARM is `const T' and ARG is only `int', we don't have
a match unless we are allowing additional qualification.
If ARG is `const int' and PARM is just `T' that's OK;
that binds `const int' to `T'. */
if (!check_cv_quals_for_unify (strict_in | UNIFY_ALLOW_LESS_CV_QUAL,
arg, parm))
return unify_cv_qual_mismatch (explain_p, parm, arg);
/* Consider the case where ARG is `const volatile int' and
PARM is `const T'. Then, T should be `volatile int'. */
arg = cp_build_qualified_type_real
(arg, cp_type_quals (arg) & ~cp_type_quals (parm), tf_none);
if (arg == error_mark_node)
return unify_invalid (explain_p);
/* Simple cases: Value already set, does match or doesn't. */
if (targ != NULL_TREE && same_type_p (targ, arg))
return unify_success (explain_p);
else if (targ)
return unify_inconsistency (explain_p, parm, targ, arg);
/* Make sure that ARG is not a variable-sized array. (Note
that were talking about variable-sized arrays (like
`int[n]'), rather than arrays of unknown size (like
`int[]').) We'll get very confused by such a type since
the bound of the array is not constant, and therefore
not mangleable. Besides, such types are not allowed in
ISO C++, so we can do as we please here. We do allow
them for 'auto' deduction, since that isn't ABI-exposed. */
if (!is_auto (parm) && variably_modified_type_p (arg, NULL_TREE))
return unify_vla_arg (explain_p, arg);
/* Strip typedefs as in convert_template_argument. */
arg = canonicalize_type_argument (arg, tf_none);
}
/* If ARG is a parameter pack or an expansion, we cannot unify
against it unless PARM is also a parameter pack. */
if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg))
&& !template_parameter_pack_p (parm))
return unify_parameter_pack_mismatch (explain_p, parm, arg);
/* If the argument deduction results is a METHOD_TYPE,
then there is a problem.
METHOD_TYPE doesn't map to any real C++ type the result of
the deduction can not be of that type. */
if (TREE_CODE (arg) == METHOD_TYPE)
return unify_method_type_error (explain_p, arg);
TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg;
return unify_success (explain_p);
case TEMPLATE_PARM_INDEX:
tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0));
if (error_operand_p (tparm))
return unify_invalid (explain_p);
if (TEMPLATE_PARM_LEVEL (parm)
!= template_decl_level (tparm))
{
/* The PARM is not one we're trying to unify. Just check
to see if it matches ARG. */
int result = !(TREE_CODE (arg) == TREE_CODE (parm)
&& cp_tree_equal (parm, arg));
if (result)
unify_expression_unequal (explain_p, parm, arg);
return result;
}
idx = TEMPLATE_PARM_IDX (parm);
targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx);
if (targ)
{
int x = !cp_tree_equal (targ, arg);
if (x)
unify_inconsistency (explain_p, parm, targ, arg);
return x;
}
/* [temp.deduct.type] If, in the declaration of a function template
with a non-type template-parameter, the non-type
template-parameter is used in an expression in the function
parameter-list and, if the corresponding template-argument is
deduced, the template-argument type shall match the type of the
template-parameter exactly, except that a template-argument
deduced from an array bound may be of any integral type.
The non-type parameter might use already deduced type parameters. */
++processing_template_decl;
tparm = tsubst (TREE_TYPE (parm), targs, 0, NULL_TREE);
--processing_template_decl;
if (tree a = type_uses_auto (tparm))
{
tparm = do_auto_deduction (tparm, arg, a, complain, adc_unify);
if (tparm == error_mark_node)
return 1;
}
if (!TREE_TYPE (arg))
/* Template-parameter dependent expression. Just accept it for now.
It will later be processed in convert_template_argument. */
;
else if (same_type_p (non_reference (TREE_TYPE (arg)),
non_reference (tparm)))
/* OK */;
else if ((strict & UNIFY_ALLOW_INTEGER)
&& CP_INTEGRAL_TYPE_P (tparm))
/* Convert the ARG to the type of PARM; the deduced non-type
template argument must exactly match the types of the
corresponding parameter. */
arg = fold (build_nop (tparm, arg));
else if (uses_template_parms (tparm))
{
/* We haven't deduced the type of this parameter yet. */
if (cxx_dialect >= cxx1z
/* We deduce from array bounds in try_array_deduction. */
&& !(strict & UNIFY_ALLOW_INTEGER))
{
/* Deduce it from the non-type argument. */
tree atype = TREE_TYPE (arg);
RECUR_AND_CHECK_FAILURE (tparms, targs,
tparm, atype,
UNIFY_ALLOW_NONE, explain_p);
}
else
/* Try again later. */
return unify_success (explain_p);
}
else
return unify_type_mismatch (explain_p, tparm, TREE_TYPE (arg));
/* If ARG is a parameter pack or an expansion, we cannot unify
against it unless PARM is also a parameter pack. */
if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg))
&& !TEMPLATE_PARM_PARAMETER_PACK (parm))
return unify_parameter_pack_mismatch (explain_p, parm, arg);
{
bool removed_attr = false;
arg = strip_typedefs_expr (arg, &removed_attr);
}
TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg;
return unify_success (explain_p);
case PTRMEM_CST:
{
/* A pointer-to-member constant can be unified only with
another constant. */
if (TREE_CODE (arg) != PTRMEM_CST)
return unify_ptrmem_cst_mismatch (explain_p, parm, arg);
/* Just unify the class member. It would be useless (and possibly
wrong, depending on the strict flags) to unify also
PTRMEM_CST_CLASS, because we want to be sure that both parm and
arg refer to the same variable, even if through different
classes. For instance:
struct A { int x; };
struct B : A { };
Unification of &A::x and &B::x must succeed. */
return unify (tparms, targs, PTRMEM_CST_MEMBER (parm),
PTRMEM_CST_MEMBER (arg), strict, explain_p);
}
case POINTER_TYPE:
{
if (!TYPE_PTR_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
/* [temp.deduct.call]
A can be another pointer or pointer to member type that can
be converted to the deduced A via a qualification
conversion (_conv.qual_).
We pass down STRICT here rather than UNIFY_ALLOW_NONE.
This will allow for additional cv-qualification of the
pointed-to types if appropriate. */
if (TREE_CODE (TREE_TYPE (arg)) == RECORD_TYPE)
/* The derived-to-base conversion only persists through one
level of pointers. */
strict |= (strict_in & UNIFY_ALLOW_DERIVED);
return unify (tparms, targs, TREE_TYPE (parm),
TREE_TYPE (arg), strict, explain_p);
}
case REFERENCE_TYPE:
if (TREE_CODE (arg) != REFERENCE_TYPE)
return unify_type_mismatch (explain_p, parm, arg);
return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg),
strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p);
case ARRAY_TYPE:
if (TREE_CODE (arg) != ARRAY_TYPE)
return unify_type_mismatch (explain_p, parm, arg);
if ((TYPE_DOMAIN (parm) == NULL_TREE)
!= (TYPE_DOMAIN (arg) == NULL_TREE))
return unify_type_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg),
strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p);
if (TYPE_DOMAIN (parm) != NULL_TREE)
return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm),
TYPE_DOMAIN (arg), explain_p);
return unify_success (explain_p);
case REAL_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case INTEGER_TYPE:
case BOOLEAN_TYPE:
case ENUMERAL_TYPE:
case VOID_TYPE:
case NULLPTR_TYPE:
if (TREE_CODE (arg) != TREE_CODE (parm))
return unify_type_mismatch (explain_p, parm, arg);
/* We have already checked cv-qualification at the top of the
function. */
if (!same_type_ignoring_top_level_qualifiers_p (arg, parm))
return unify_type_mismatch (explain_p, parm, arg);
/* As far as unification is concerned, this wins. Later checks
will invalidate it if necessary. */
return unify_success (explain_p);
/* Types INTEGER_CST and MINUS_EXPR can come from array bounds. */
/* Type INTEGER_CST can come from ordinary constant template args. */
case INTEGER_CST:
while (CONVERT_EXPR_P (arg))
arg = TREE_OPERAND (arg, 0);
if (TREE_CODE (arg) != INTEGER_CST)
return unify_template_argument_mismatch (explain_p, parm, arg);
return (tree_int_cst_equal (parm, arg)
? unify_success (explain_p)
: unify_template_argument_mismatch (explain_p, parm, arg));
case TREE_VEC:
{
int i, len, argslen;
int parm_variadic_p = 0;
if (TREE_CODE (arg) != TREE_VEC)
return unify_template_argument_mismatch (explain_p, parm, arg);
len = TREE_VEC_LENGTH (parm);
argslen = TREE_VEC_LENGTH (arg);
/* Check for pack expansions in the parameters. */
for (i = 0; i < len; ++i)
{
if (PACK_EXPANSION_P (TREE_VEC_ELT (parm, i)))
{
if (i == len - 1)
/* We can unify against something with a trailing
parameter pack. */
parm_variadic_p = 1;
else
/* [temp.deduct.type]/9: If the template argument list of
P contains a pack expansion that is not the last
template argument, the entire template argument list
is a non-deduced context. */
return unify_success (explain_p);
}
}
/* If we don't have enough arguments to satisfy the parameters
(not counting the pack expression at the end), or we have
too many arguments for a parameter list that doesn't end in
a pack expression, we can't unify. */
if (parm_variadic_p
? argslen < len - parm_variadic_p
: argslen != len)
return unify_arity (explain_p, TREE_VEC_LENGTH (arg), len);
/* Unify all of the parameters that precede the (optional)
pack expression. */
for (i = 0; i < len - parm_variadic_p; ++i)
{
RECUR_AND_CHECK_FAILURE (tparms, targs,
TREE_VEC_ELT (parm, i),
TREE_VEC_ELT (arg, i),
UNIFY_ALLOW_NONE, explain_p);
}
if (parm_variadic_p)
return unify_pack_expansion (tparms, targs, parm, arg,
DEDUCE_EXACT,
/*subr=*/true, explain_p);
return unify_success (explain_p);
}
case RECORD_TYPE:
case UNION_TYPE:
if (TREE_CODE (arg) != TREE_CODE (parm))
return unify_type_mismatch (explain_p, parm, arg);
if (TYPE_PTRMEMFUNC_P (parm))
{
if (!TYPE_PTRMEMFUNC_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
return unify (tparms, targs,
TYPE_PTRMEMFUNC_FN_TYPE (parm),
TYPE_PTRMEMFUNC_FN_TYPE (arg),
strict, explain_p);
}
else if (TYPE_PTRMEMFUNC_P (arg))
return unify_type_mismatch (explain_p, parm, arg);
if (CLASSTYPE_TEMPLATE_INFO (parm))
{
tree t = NULL_TREE;
if (strict_in & UNIFY_ALLOW_DERIVED)
{
/* First, we try to unify the PARM and ARG directly. */
t = try_class_unification (tparms, targs,
parm, arg, explain_p);
if (!t)
{
/* Fallback to the special case allowed in
[temp.deduct.call]:
If P is a class, and P has the form
template-id, then A can be a derived class of
the deduced A. Likewise, if P is a pointer to
a class of the form template-id, A can be a
pointer to a derived class pointed to by the
deduced A. */
enum template_base_result r;
r = get_template_base (tparms, targs, parm, arg,
explain_p, &t);
if (!t)
{
/* Don't give the derived diagnostic if we're
already dealing with the same template. */
bool same_template
= (CLASSTYPE_TEMPLATE_INFO (arg)
&& (CLASSTYPE_TI_TEMPLATE (parm)
== CLASSTYPE_TI_TEMPLATE (arg)));
return unify_no_common_base (explain_p && !same_template,
r, parm, arg);
}
}
}
else if (CLASSTYPE_TEMPLATE_INFO (arg)
&& (CLASSTYPE_TI_TEMPLATE (parm)
== CLASSTYPE_TI_TEMPLATE (arg)))
/* Perhaps PARM is something like S<U> and ARG is S<int>.
Then, we should unify `int' and `U'. */
t = arg;
else
/* There's no chance of unification succeeding. */
return unify_type_mismatch (explain_p, parm, arg);
return unify (tparms, targs, CLASSTYPE_TI_ARGS (parm),
CLASSTYPE_TI_ARGS (t), UNIFY_ALLOW_NONE, explain_p);
}
else if (!same_type_ignoring_top_level_qualifiers_p (parm, arg))
return unify_type_mismatch (explain_p, parm, arg);
return unify_success (explain_p);
case METHOD_TYPE:
case FUNCTION_TYPE:
{
unsigned int nargs;
tree *args;
tree a;
unsigned int i;
if (TREE_CODE (arg) != TREE_CODE (parm))
return unify_type_mismatch (explain_p, parm, arg);
/* CV qualifications for methods can never be deduced, they must
match exactly. We need to check them explicitly here,
because type_unification_real treats them as any other
cv-qualified parameter. */
if (TREE_CODE (parm) == METHOD_TYPE
&& (!check_cv_quals_for_unify
(UNIFY_ALLOW_NONE,
class_of_this_parm (arg),
class_of_this_parm (parm))))
return unify_cv_qual_mismatch (explain_p, parm, arg);
if (TREE_CODE (arg) == FUNCTION_TYPE
&& type_memfn_quals (parm) != type_memfn_quals (arg))
return unify_cv_qual_mismatch (explain_p, parm, arg);
if (type_memfn_rqual (parm) != type_memfn_rqual (arg))
return unify_type_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm),
TREE_TYPE (arg), UNIFY_ALLOW_NONE, explain_p);
nargs = list_length (TYPE_ARG_TYPES (arg));
args = XALLOCAVEC (tree, nargs);
for (a = TYPE_ARG_TYPES (arg), i = 0;
a != NULL_TREE && a != void_list_node;
a = TREE_CHAIN (a), ++i)
args[i] = TREE_VALUE (a);
nargs = i;
if (type_unification_real (tparms, targs, TYPE_ARG_TYPES (parm),
args, nargs, 1, DEDUCE_EXACT,
LOOKUP_NORMAL, NULL, explain_p))
return 1;
if (flag_noexcept_type)
{
tree pspec = TYPE_RAISES_EXCEPTIONS (parm);
tree aspec = canonical_eh_spec (TYPE_RAISES_EXCEPTIONS (arg));
if (pspec == NULL_TREE) pspec = noexcept_false_spec;
if (aspec == NULL_TREE) aspec = noexcept_false_spec;
if (TREE_PURPOSE (pspec) && TREE_PURPOSE (aspec)
&& uses_template_parms (TREE_PURPOSE (pspec)))
RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_PURPOSE (pspec),
TREE_PURPOSE (aspec),
UNIFY_ALLOW_NONE, explain_p);
else if (nothrow_spec_p (pspec) && !nothrow_spec_p (aspec))
return unify_type_mismatch (explain_p, parm, arg);
}
return 0;
}
case OFFSET_TYPE:
/* Unify a pointer to member with a pointer to member function, which
deduces the type of the member as a function type. */
if (TYPE_PTRMEMFUNC_P (arg))
{
/* Check top-level cv qualifiers */
if (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, arg, parm))
return unify_cv_qual_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm),
TYPE_PTRMEMFUNC_OBJECT_TYPE (arg),
UNIFY_ALLOW_NONE, explain_p);
/* Determine the type of the function we are unifying against. */
tree fntype = static_fn_type (arg);
return unify (tparms, targs, TREE_TYPE (parm), fntype, strict, explain_p);
}
if (TREE_CODE (arg) != OFFSET_TYPE)
return unify_type_mismatch (explain_p, parm, arg);
RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm),
TYPE_OFFSET_BASETYPE (arg),
UNIFY_ALLOW_NONE, explain_p);
return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg),
strict, explain_p);
case CONST_DECL:
if (DECL_TEMPLATE_PARM_P (parm))
return unify (tparms, targs, DECL_INITIAL (parm), arg, strict, explain_p);
if (arg != scalar_constant_value (parm))
return unify_template_argument_mismatch (explain_p, parm, arg);
return unify_success (explain_p);
case FIELD_DECL:
case TEMPLATE_DECL:
/* Matched cases are handled by the ARG == PARM test above. */
return unify_template_argument_mismatch (explain_p, parm, arg);
case VAR_DECL:
/* We might get a variable as a non-type template argument in parm if the
corresponding parameter is type-dependent. Make any necessary
adjustments based on whether arg is a reference. */
if (CONSTANT_CLASS_P (arg))
parm = fold_non_dependent_expr (parm);
else if (REFERENCE_REF_P (arg))
{
tree sub = TREE_OPERAND (arg, 0);
STRIP_NOPS (sub);
if (TREE_CODE (sub) == ADDR_EXPR)
arg = TREE_OPERAND (sub, 0);
}
/* Now use the normal expression code to check whether they match. */
goto expr;
case TYPE_ARGUMENT_PACK:
case NONTYPE_ARGUMENT_PACK:
return unify (tparms, targs, ARGUMENT_PACK_ARGS (parm),
ARGUMENT_PACK_ARGS (arg), strict, explain_p);
case TYPEOF_TYPE:
case DECLTYPE_TYPE:
case UNDERLYING_TYPE:
/* Cannot deduce anything from TYPEOF_TYPE, DECLTYPE_TYPE,
or UNDERLYING_TYPE nodes. */
return unify_success (explain_p);
case ERROR_MARK:
/* Unification fails if we hit an error node. */
return unify_invalid (explain_p);
case INDIRECT_REF:
if (REFERENCE_REF_P (parm))
{
bool pexp = PACK_EXPANSION_P (arg);
if (pexp)
arg = PACK_EXPANSION_PATTERN (arg);
if (REFERENCE_REF_P (arg))
arg = TREE_OPERAND (arg, 0);
if (pexp)
arg = make_pack_expansion (arg);
return unify (tparms, targs, TREE_OPERAND (parm, 0), arg,
strict, explain_p);
}
/* FALLTHRU */
default:
/* An unresolved overload is a nondeduced context. */
if (is_overloaded_fn (parm) || type_unknown_p (parm))
return unify_success (explain_p);
gcc_assert (EXPR_P (parm) || TREE_CODE (parm) == TRAIT_EXPR);
expr:
/* We must be looking at an expression. This can happen with
something like:
template <int I>
void foo(S<I>, S<I + 2>);
This is a "nondeduced context":
[deduct.type]
The nondeduced contexts are:
--A type that is a template-id in which one or more of
the template-arguments is an expression that references
a template-parameter.
In these cases, we assume deduction succeeded, but don't
actually infer any unifications. */
if (!uses_template_parms (parm)
&& !template_args_equal (parm, arg))
return unify_expression_unequal (explain_p, parm, arg);
else
return unify_success (explain_p);
}
}
#undef RECUR_AND_CHECK_FAILURE
/* Note that DECL can be defined in this translation unit, if
required. */
static void
mark_definable (tree decl)
{
tree clone;
DECL_NOT_REALLY_EXTERN (decl) = 1;
FOR_EACH_CLONE (clone, decl)
DECL_NOT_REALLY_EXTERN (clone) = 1;
}
/* Called if RESULT is explicitly instantiated, or is a member of an
explicitly instantiated class. */
void
mark_decl_instantiated (tree result, int extern_p)
{
SET_DECL_EXPLICIT_INSTANTIATION (result);
/* If this entity has already been written out, it's too late to
make any modifications. */
if (TREE_ASM_WRITTEN (result))
return;
/* For anonymous namespace we don't need to do anything. */
if (decl_anon_ns_mem_p (result))
{
gcc_assert (!TREE_PUBLIC (result));
return;
}
if (TREE_CODE (result) != FUNCTION_DECL)
/* The TREE_PUBLIC flag for function declarations will have been
set correctly by tsubst. */
TREE_PUBLIC (result) = 1;
/* This might have been set by an earlier implicit instantiation. */
DECL_COMDAT (result) = 0;
if (extern_p)
DECL_NOT_REALLY_EXTERN (result) = 0;
else
{
mark_definable (result);
mark_needed (result);
/* Always make artificials weak. */
if (DECL_ARTIFICIAL (result) && flag_weak)
comdat_linkage (result);
/* For WIN32 we also want to put explicit instantiations in
linkonce sections. */
else if (TREE_PUBLIC (result))
maybe_make_one_only (result);
}
/* If EXTERN_P, then this function will not be emitted -- unless
followed by an explicit instantiation, at which point its linkage
will be adjusted. If !EXTERN_P, then this function will be
emitted here. In neither circumstance do we want
import_export_decl to adjust the linkage. */
DECL_INTERFACE_KNOWN (result) = 1;
}
/* Subroutine of more_specialized_fn: check whether TARGS is missing any
important template arguments. If any are missing, we check whether
they're important by using error_mark_node for substituting into any
args that were used for partial ordering (the ones between ARGS and END)
and seeing if it bubbles up. */
static bool
check_undeduced_parms (tree targs, tree args, tree end)
{
bool found = false;
int i;
for (i = TREE_VEC_LENGTH (targs) - 1; i >= 0; --i)
if (TREE_VEC_ELT (targs, i) == NULL_TREE)
{
found = true;
TREE_VEC_ELT (targs, i) = error_mark_node;
}
if (found)
{
tree substed = tsubst_arg_types (args, targs, end, tf_none, NULL_TREE);
if (substed == error_mark_node)
return true;
}
return false;
}
/* Given two function templates PAT1 and PAT2, return:
1 if PAT1 is more specialized than PAT2 as described in [temp.func.order].
-1 if PAT2 is more specialized than PAT1.
0 if neither is more specialized.
LEN indicates the number of parameters we should consider
(defaulted parameters should not be considered).
The 1998 std underspecified function template partial ordering, and
DR214 addresses the issue. We take pairs of arguments, one from
each of the templates, and deduce them against each other. One of
the templates will be more specialized if all the *other*
template's arguments deduce against its arguments and at least one
of its arguments *does* *not* deduce against the other template's
corresponding argument. Deduction is done as for class templates.
The arguments used in deduction have reference and top level cv
qualifiers removed. Iff both arguments were originally reference
types *and* deduction succeeds in both directions, an lvalue reference
wins against an rvalue reference and otherwise the template
with the more cv-qualified argument wins for that pairing (if
neither is more cv-qualified, they both are equal). Unlike regular
deduction, after all the arguments have been deduced in this way,
we do *not* verify the deduced template argument values can be
substituted into non-deduced contexts.
The logic can be a bit confusing here, because we look at deduce1 and
targs1 to see if pat2 is at least as specialized, and vice versa; if we
can find template arguments for pat1 to make arg1 look like arg2, that
means that arg2 is at least as specialized as arg1. */
int
more_specialized_fn (tree pat1, tree pat2, int len)
{
tree decl1 = DECL_TEMPLATE_RESULT (pat1);
tree decl2 = DECL_TEMPLATE_RESULT (pat2);
tree targs1 = make_tree_vec (DECL_NTPARMS (pat1));
tree targs2 = make_tree_vec (DECL_NTPARMS (pat2));
tree tparms1 = DECL_INNERMOST_TEMPLATE_PARMS (pat1);
tree tparms2 = DECL_INNERMOST_TEMPLATE_PARMS (pat2);
tree args1 = TYPE_ARG_TYPES (TREE_TYPE (decl1));
tree args2 = TYPE_ARG_TYPES (TREE_TYPE (decl2));
tree origs1, origs2;
bool lose1 = false;
bool lose2 = false;
/* Remove the this parameter from non-static member functions. If
one is a non-static member function and the other is not a static
member function, remove the first parameter from that function
also. This situation occurs for operator functions where we
locate both a member function (with this pointer) and non-member
operator (with explicit first operand). */
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1))
{
len--; /* LEN is the number of significant arguments for DECL1 */
args1 = TREE_CHAIN (args1);
if (!DECL_STATIC_FUNCTION_P (decl2))
args2 = TREE_CHAIN (args2);
}
else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2))
{
args2 = TREE_CHAIN (args2);
if (!DECL_STATIC_FUNCTION_P (decl1))
{
len--;
args1 = TREE_CHAIN (args1);
}
}
/* If only one is a conversion operator, they are unordered. */
if (DECL_CONV_FN_P (decl1) != DECL_CONV_FN_P (decl2))
return 0;
/* Consider the return type for a conversion function */
if (DECL_CONV_FN_P (decl1))
{
args1 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl1)), args1);
args2 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl2)), args2);
len++;
}
processing_template_decl++;
origs1 = args1;
origs2 = args2;
while (len--
/* Stop when an ellipsis is seen. */
&& args1 != NULL_TREE && args2 != NULL_TREE)
{
tree arg1 = TREE_VALUE (args1);
tree arg2 = TREE_VALUE (args2);
int deduce1, deduce2;
int quals1 = -1;
int quals2 = -1;
int ref1 = 0;
int ref2 = 0;
if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION
&& TREE_CODE (arg2) == TYPE_PACK_EXPANSION)
{
/* When both arguments are pack expansions, we need only
unify the patterns themselves. */
arg1 = PACK_EXPANSION_PATTERN (arg1);
arg2 = PACK_EXPANSION_PATTERN (arg2);
/* This is the last comparison we need to do. */
len = 0;
}
if (TREE_CODE (arg1) == REFERENCE_TYPE)
{
ref1 = TYPE_REF_IS_RVALUE (arg1) + 1;
arg1 = TREE_TYPE (arg1);
quals1 = cp_type_quals (arg1);
}
if (TREE_CODE (arg2) == REFERENCE_TYPE)
{
ref2 = TYPE_REF_IS_RVALUE (arg2) + 1;
arg2 = TREE_TYPE (arg2);
quals2 = cp_type_quals (arg2);
}
arg1 = TYPE_MAIN_VARIANT (arg1);
arg2 = TYPE_MAIN_VARIANT (arg2);
if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION)
{
int i, len2 = remaining_arguments (args2);
tree parmvec = make_tree_vec (1);
tree argvec = make_tree_vec (len2);
tree ta = args2;
/* Setup the parameter vector, which contains only ARG1. */
TREE_VEC_ELT (parmvec, 0) = arg1;
/* Setup the argument vector, which contains the remaining
arguments. */
for (i = 0; i < len2; i++, ta = TREE_CHAIN (ta))
TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta);
deduce1 = (unify_pack_expansion (tparms1, targs1, parmvec,
argvec, DEDUCE_EXACT,
/*subr=*/true, /*explain_p=*/false)
== 0);
/* We cannot deduce in the other direction, because ARG1 is
a pack expansion but ARG2 is not. */
deduce2 = 0;
}
else if (TREE_CODE (arg2) == TYPE_PACK_EXPANSION)
{
int i, len1 = remaining_arguments (args1);
tree parmvec = make_tree_vec (1);
tree argvec = make_tree_vec (len1);
tree ta = args1;
/* Setup the parameter vector, which contains only ARG1. */
TREE_VEC_ELT (parmvec, 0) = arg2;
/* Setup the argument vector, which contains the remaining
arguments. */
for (i = 0; i < len1; i++, ta = TREE_CHAIN (ta))
TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta);
deduce2 = (unify_pack_expansion (tparms2, targs2, parmvec,
argvec, DEDUCE_EXACT,
/*subr=*/true, /*explain_p=*/false)
== 0);
/* We cannot deduce in the other direction, because ARG2 is
a pack expansion but ARG1 is not.*/
deduce1 = 0;
}
else
{
/* The normal case, where neither argument is a pack
expansion. */
deduce1 = (unify (tparms1, targs1, arg1, arg2,
UNIFY_ALLOW_NONE, /*explain_p=*/false)
== 0);
deduce2 = (unify (tparms2, targs2, arg2, arg1,
UNIFY_ALLOW_NONE, /*explain_p=*/false)
== 0);
}
/* If we couldn't deduce arguments for tparms1 to make arg1 match
arg2, then arg2 is not as specialized as arg1. */
if (!deduce1)
lose2 = true;
if (!deduce2)
lose1 = true;
/* "If, for a given type, deduction succeeds in both directions
(i.e., the types are identical after the transformations above)
and both P and A were reference types (before being replaced with
the type referred to above):
- if the type from the argument template was an lvalue reference and
the type from the parameter template was not, the argument type is
considered to be more specialized than the other; otherwise,
- if the type from the argument template is more cv-qualified
than the type from the parameter template (as described above),
the argument type is considered to be more specialized than the other;
otherwise,
- neither type is more specialized than the other." */
if (deduce1 && deduce2)
{
if (ref1 && ref2 && ref1 != ref2)
{
if (ref1 > ref2)
lose1 = true;
else
lose2 = true;
}
else if (quals1 != quals2 && quals1 >= 0 && quals2 >= 0)
{
if ((quals1 & quals2) == quals2)
lose2 = true;
if ((quals1 & quals2) == quals1)
lose1 = true;
}
}
if (lose1 && lose2)
/* We've failed to deduce something in either direction.
These must be unordered. */
break;
if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION
|| TREE_CODE (arg2) == TYPE_PACK_EXPANSION)
/* We have already processed all of the arguments in our
handing of the pack expansion type. */
len = 0;
args1 = TREE_CHAIN (args1);
args2 = TREE_CHAIN (args2);
}
/* "In most cases, all template parameters must have values in order for
deduction to succeed, but for partial ordering purposes a template
parameter may remain without a value provided it is not used in the
types being used for partial ordering."
Thus, if we are missing any of the targs1 we need to substitute into
origs1, then pat2 is not as specialized as pat1. This can happen when
there is a nondeduced context. */
if (!lose2 && check_undeduced_parms (targs1, origs1, args1))
lose2 = true;
if (!lose1 && check_undeduced_parms (targs2, origs2, args2))
lose1 = true;
processing_template_decl--;
/* If both deductions succeed, the partial ordering selects the more
constrained template. */
if (!lose1 && !lose2)
{
tree c1 = get_constraints (DECL_TEMPLATE_RESULT (pat1));
tree c2 = get_constraints (DECL_TEMPLATE_RESULT (pat2));
lose1 = !subsumes_constraints (c1, c2);
lose2 = !subsumes_constraints (c2, c1);
}
/* All things being equal, if the next argument is a pack expansion
for one function but not for the other, prefer the
non-variadic function. FIXME this is bogus; see c++/41958. */
if (lose1 == lose2
&& args1 && TREE_VALUE (args1)
&& args2 && TREE_VALUE (args2))
{
lose1 = TREE_CODE (TREE_VALUE (args1)) == TYPE_PACK_EXPANSION;
lose2 = TREE_CODE (TREE_VALUE (args2)) == TYPE_PACK_EXPANSION;
}
if (lose1 == lose2)
return 0;
else if (!lose1)
return 1;
else
return -1;
}
/* Determine which of two partial specializations of TMPL is more
specialized.
PAT1 is a TREE_LIST whose TREE_VALUE is the TEMPLATE_DECL corresponding
to the first partial specialization. The TREE_PURPOSE is the
innermost set of template parameters for the partial
specialization. PAT2 is similar, but for the second template.
Return 1 if the first partial specialization is more specialized;
-1 if the second is more specialized; 0 if neither is more
specialized.
See [temp.class.order] for information about determining which of
two templates is more specialized. */
static int
more_specialized_partial_spec (tree tmpl, tree pat1, tree pat2)
{
tree targs;
int winner = 0;
bool any_deductions = false;
tree tmpl1 = TREE_VALUE (pat1);
tree tmpl2 = TREE_VALUE (pat2);
tree specargs1 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl1)));
tree specargs2 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl2)));
/* Just like what happens for functions, if we are ordering between
different template specializations, we may encounter dependent
types in the arguments, and we need our dependency check functions
to behave correctly. */
++processing_template_decl;
targs = get_partial_spec_bindings (tmpl, tmpl1, specargs2);
if (targs)
{
--winner;
any_deductions = true;
}
targs = get_partial_spec_bindings (tmpl, tmpl2, specargs1);
if (targs)
{
++winner;
any_deductions = true;
}
--processing_template_decl;
/* If both deductions succeed, the partial ordering selects the more
constrained template. */
if (!winner && any_deductions)
return more_constrained (tmpl1, tmpl2);
/* In the case of a tie where at least one of the templates
has a parameter pack at the end, the template with the most
non-packed parameters wins. */
if (winner == 0
&& any_deductions
&& (template_args_variadic_p (TREE_PURPOSE (pat1))
|| template_args_variadic_p (TREE_PURPOSE (pat2))))
{
tree args1 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat1));
tree args2 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat2));
int len1 = TREE_VEC_LENGTH (args1);
int len2 = TREE_VEC_LENGTH (args2);
/* We don't count the pack expansion at the end. */
if (template_args_variadic_p (TREE_PURPOSE (pat1)))
--len1;
if (template_args_variadic_p (TREE_PURPOSE (pat2)))
--len2;
if (len1 > len2)
return 1;
else if (len1 < len2)
return -1;
}
return winner;
}
/* Return the template arguments that will produce the function signature
DECL from the function template FN, with the explicit template
arguments EXPLICIT_ARGS. If CHECK_RETTYPE is true, the return type must
also match. Return NULL_TREE if no satisfactory arguments could be
found. */
static tree
get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype)
{
int ntparms = DECL_NTPARMS (fn);
tree targs = make_tree_vec (ntparms);
tree decl_type = TREE_TYPE (decl);
tree decl_arg_types;
tree *args;
unsigned int nargs, ix;
tree arg;
gcc_assert (decl != DECL_TEMPLATE_RESULT (fn));
/* Never do unification on the 'this' parameter. */
decl_arg_types = skip_artificial_parms_for (decl,
TYPE_ARG_TYPES (decl_type));
nargs = list_length (decl_arg_types);
args = XALLOCAVEC (tree, nargs);
for (arg = decl_arg_types, ix = 0;
arg != NULL_TREE && arg != void_list_node;
arg = TREE_CHAIN (arg), ++ix)
args[ix] = TREE_VALUE (arg);
if (fn_type_unification (fn, explicit_args, targs,
args, ix,
(check_rettype || DECL_CONV_FN_P (fn)
? TREE_TYPE (decl_type) : NULL_TREE),
DEDUCE_EXACT, LOOKUP_NORMAL, /*explain_p=*/false,
/*decltype*/false)
== error_mark_node)
return NULL_TREE;
return targs;
}
/* Return the innermost template arguments that, when applied to a partial
specialization SPEC_TMPL of TMPL, yield the ARGS.
For example, suppose we have:
template <class T, class U> struct S {};
template <class T> struct S<T*, int> {};
Then, suppose we want to get `S<double*, int>'. SPEC_TMPL will be the
partial specialization and the ARGS will be {double*, int}. The resulting
vector will be {double}, indicating that `T' is bound to `double'. */
static tree
get_partial_spec_bindings (tree tmpl, tree spec_tmpl, tree args)
{
tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (spec_tmpl);
tree spec_args
= TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (spec_tmpl)));
int i, ntparms = TREE_VEC_LENGTH (tparms);
tree deduced_args;
tree innermost_deduced_args;
innermost_deduced_args = make_tree_vec (ntparms);
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args))
{
deduced_args = copy_node (args);
SET_TMPL_ARGS_LEVEL (deduced_args,
TMPL_ARGS_DEPTH (deduced_args),
innermost_deduced_args);
}
else
deduced_args = innermost_deduced_args;
bool tried_array_deduction = (cxx_dialect < cxx1z);
again:
if (unify (tparms, deduced_args,
INNERMOST_TEMPLATE_ARGS (spec_args),
INNERMOST_TEMPLATE_ARGS (args),
UNIFY_ALLOW_NONE, /*explain_p=*/false))
return NULL_TREE;
for (i = 0; i < ntparms; ++i)
if (! TREE_VEC_ELT (innermost_deduced_args, i))
{
if (!tried_array_deduction)
{
try_array_deduction (tparms, innermost_deduced_args,
INNERMOST_TEMPLATE_ARGS (spec_args));
tried_array_deduction = true;
if (TREE_VEC_ELT (innermost_deduced_args, i))
goto again;
}
return NULL_TREE;
}
tree tinst = build_tree_list (spec_tmpl, deduced_args);
if (!push_tinst_level (tinst))
{
excessive_deduction_depth = true;
return NULL_TREE;
}
/* Verify that nondeduced template arguments agree with the type
obtained from argument deduction.
For example:
struct A { typedef int X; };
template <class T, class U> struct C {};
template <class T> struct C<T, typename T::X> {};
Then with the instantiation `C<A, int>', we can deduce that
`T' is `A' but unify () does not check whether `typename T::X'
is `int'. */
spec_args = tsubst (spec_args, deduced_args, tf_none, NULL_TREE);
if (spec_args != error_mark_node)
spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl),
INNERMOST_TEMPLATE_ARGS (spec_args),
tmpl, tf_none, false, false);
pop_tinst_level ();
if (spec_args == error_mark_node
/* We only need to check the innermost arguments; the other
arguments will always agree. */
|| !comp_template_args_porder (INNERMOST_TEMPLATE_ARGS (spec_args),
INNERMOST_TEMPLATE_ARGS (args)))
return NULL_TREE;
/* Now that we have bindings for all of the template arguments,
ensure that the arguments deduced for the template template
parameters have compatible template parameter lists. See the use
of template_template_parm_bindings_ok_p in fn_type_unification
for more information. */
if (!template_template_parm_bindings_ok_p (tparms, deduced_args))
return NULL_TREE;
return deduced_args;
}
// Compare two function templates T1 and T2 by deducing bindings
// from one against the other. If both deductions succeed, compare
// constraints to see which is more constrained.
static int
more_specialized_inst (tree t1, tree t2)
{
int fate = 0;
int count = 0;
if (get_bindings (t1, DECL_TEMPLATE_RESULT (t2), NULL_TREE, true))
{
--fate;
++count;
}
if (get_bindings (t2, DECL_TEMPLATE_RESULT (t1), NULL_TREE, true))
{
++fate;
++count;
}
// If both deductions succeed, then one may be more constrained.
if (count == 2 && fate == 0)
fate = more_constrained (t1, t2);
return fate;
}
/* TEMPLATES is a TREE_LIST. Each TREE_VALUE is a TEMPLATE_DECL.
Return the TREE_LIST node with the most specialized template, if
any. If there is no most specialized template, the error_mark_node
is returned.
Note that this function does not look at, or modify, the
TREE_PURPOSE or TREE_TYPE of any of the nodes. Since the node
returned is one of the elements of INSTANTIATIONS, callers may
store information in the TREE_PURPOSE or TREE_TYPE of the nodes,
and retrieve it from the value returned. */
tree
most_specialized_instantiation (tree templates)
{
tree fn, champ;
++processing_template_decl;
champ = templates;
for (fn = TREE_CHAIN (templates); fn; fn = TREE_CHAIN (fn))
{
int fate = more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn));
if (fate == -1)
champ = fn;
else if (!fate)
{
/* Equally specialized, move to next function. If there
is no next function, nothing's most specialized. */
fn = TREE_CHAIN (fn);
champ = fn;
if (!fn)
break;
}
}
if (champ)
/* Now verify that champ is better than everything earlier in the
instantiation list. */
for (fn = templates; fn != champ; fn = TREE_CHAIN (fn)) {
if (more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)) != 1)
{
champ = NULL_TREE;
break;
}
}
processing_template_decl--;
if (!champ)
return error_mark_node;
return champ;
}
/* If DECL is a specialization of some template, return the most
general such template. Otherwise, returns NULL_TREE.
For example, given:
template <class T> struct S { template <class U> void f(U); };
if TMPL is `template <class U> void S<int>::f(U)' this will return
the full template. This function will not trace past partial
specializations, however. For example, given in addition:
template <class T> struct S<T*> { template <class U> void f(U); };
if TMPL is `template <class U> void S<int*>::f(U)' this will return
`template <class T> template <class U> S<T*>::f(U)'. */
tree
most_general_template (tree decl)
{
if (TREE_CODE (decl) != TEMPLATE_DECL)
{
if (tree tinfo = get_template_info (decl))
decl = TI_TEMPLATE (tinfo);
/* The TI_TEMPLATE can be an IDENTIFIER_NODE for a
template friend, or a FIELD_DECL for a capture pack. */
if (TREE_CODE (decl) != TEMPLATE_DECL)
return NULL_TREE;
}
/* Look for more and more general templates. */
while (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl))
{
/* The DECL_TI_TEMPLATE can be an IDENTIFIER_NODE in some cases.
(See cp-tree.h for details.) */
if (TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL)
break;
if (CLASS_TYPE_P (TREE_TYPE (decl))
&& !TYPE_DECL_ALIAS_P (TYPE_NAME (TREE_TYPE (decl)))
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl)))
break;
/* Stop if we run into an explicitly specialized class template. */
if (!DECL_NAMESPACE_SCOPE_P (decl)
&& DECL_CONTEXT (decl)
&& CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (decl)))
break;
decl = DECL_TI_TEMPLATE (decl);
}
return decl;
}
/* Return the most specialized of the template partial specializations
which can produce TARGET, a specialization of some class or variable
template. The value returned is actually a TREE_LIST; the TREE_VALUE is
a TEMPLATE_DECL node corresponding to the partial specialization, while
the TREE_PURPOSE is the set of template arguments that must be
substituted into the template pattern in order to generate TARGET.
If the choice of partial specialization is ambiguous, a diagnostic
is issued, and the error_mark_node is returned. If there are no
partial specializations matching TARGET, then NULL_TREE is
returned, indicating that the primary template should be used. */
static tree
most_specialized_partial_spec (tree target, tsubst_flags_t complain)
{
tree list = NULL_TREE;
tree t;
tree champ;
int fate;
bool ambiguous_p;
tree outer_args = NULL_TREE;
tree tmpl, args;
if (TYPE_P (target))
{
tree tinfo = CLASSTYPE_TEMPLATE_INFO (target);
tmpl = TI_TEMPLATE (tinfo);
args = TI_ARGS (tinfo);
}
else if (TREE_CODE (target) == TEMPLATE_ID_EXPR)
{
tmpl = TREE_OPERAND (target, 0);
args = TREE_OPERAND (target, 1);
}
else if (VAR_P (target))
{
tree tinfo = DECL_TEMPLATE_INFO (target);
tmpl = TI_TEMPLATE (tinfo);
args = TI_ARGS (tinfo);
}
else
gcc_unreachable ();
tree main_tmpl = most_general_template (tmpl);
/* For determining which partial specialization to use, only the
innermost args are interesting. */
if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args))
{
outer_args = strip_innermost_template_args (args, 1);
args = INNERMOST_TEMPLATE_ARGS (args);
}
for (t = DECL_TEMPLATE_SPECIALIZATIONS (main_tmpl); t; t = TREE_CHAIN (t))
{
tree spec_args;
tree spec_tmpl = TREE_VALUE (t);
if (outer_args)
{
/* Substitute in the template args from the enclosing class. */
++processing_template_decl;
spec_tmpl = tsubst (spec_tmpl, outer_args, tf_none, NULL_TREE);
--processing_template_decl;
}
if (spec_tmpl == error_mark_node)
return error_mark_node;
spec_args = get_partial_spec_bindings (tmpl, spec_tmpl, args);
if (spec_args)
{
if (outer_args)
spec_args = add_to_template_args (outer_args, spec_args);
/* Keep the candidate only if the constraints are satisfied,
or if we're not compiling with concepts. */
if (!flag_concepts
|| constraints_satisfied_p (spec_tmpl, spec_args))
{
list = tree_cons (spec_args, TREE_VALUE (t), list);
TREE_TYPE (list) = TREE_TYPE (t);
}
}
}
if (! list)
return NULL_TREE;
ambiguous_p = false;
t = list;
champ = t;
t = TREE_CHAIN (t);
for (; t; t = TREE_CHAIN (t))
{
fate = more_specialized_partial_spec (tmpl, champ, t);
if (fate == 1)
;
else
{
if (fate == 0)
{
t = TREE_CHAIN (t);
if (! t)
{
ambiguous_p = true;
break;
}
}
champ = t;
}
}
if (!ambiguous_p)
for (t = list; t && t != champ; t = TREE_CHAIN (t))
{
fate = more_specialized_partial_spec (tmpl, champ, t);
if (fate != 1)
{
ambiguous_p = true;
break;
}
}
if (ambiguous_p)
{
const char *str;
char *spaces = NULL;
if (!(complain & tf_error))
return error_mark_node;
if (TYPE_P (target))
error ("ambiguous template instantiation for %q#T", target);
else
error ("ambiguous template instantiation for %q#D", target);
str = ngettext ("candidate is:", "candidates are:", list_length (list));
for (t = list; t; t = TREE_CHAIN (t))
{
tree subst = build_tree_list (TREE_VALUE (t), TREE_PURPOSE (t));
inform (DECL_SOURCE_LOCATION (TREE_VALUE (t)),
"%s %#S", spaces ? spaces : str, subst);
spaces = spaces ? spaces : get_spaces (str);
}
free (spaces);
return error_mark_node;
}
return champ;
}
/* Explicitly instantiate DECL. */
void
do_decl_instantiation (tree decl, tree storage)
{
tree result = NULL_TREE;
int extern_p = 0;
if (!decl || decl == error_mark_node)
/* An error occurred, for which grokdeclarator has already issued
an appropriate message. */
return;
else if (! DECL_LANG_SPECIFIC (decl))
{
error ("explicit instantiation of non-template %q#D", decl);
return;
}
bool var_templ = (DECL_TEMPLATE_INFO (decl)
&& variable_template_p (DECL_TI_TEMPLATE (decl)));
if (VAR_P (decl) && !var_templ)
{
/* There is an asymmetry here in the way VAR_DECLs and
FUNCTION_DECLs are handled by grokdeclarator. In the case of
the latter, the DECL we get back will be marked as a
template instantiation, and the appropriate
DECL_TEMPLATE_INFO will be set up. This does not happen for
VAR_DECLs so we do the lookup here. Probably, grokdeclarator
should handle VAR_DECLs as it currently handles
FUNCTION_DECLs. */
if (!DECL_CLASS_SCOPE_P (decl))
{
error ("%qD is not a static data member of a class template", decl);
return;
}
result = lookup_field (DECL_CONTEXT (decl), DECL_NAME (decl), 0, false);
if (!result || !VAR_P (result))
{
error ("no matching template for %qD found", decl);
return;
}
if (!same_type_p (TREE_TYPE (result), TREE_TYPE (decl)))
{
error ("type %qT for explicit instantiation %qD does not match "
"declared type %qT", TREE_TYPE (result), decl,
TREE_TYPE (decl));
return;
}
}
else if (TREE_CODE (decl) != FUNCTION_DECL && !var_templ)
{
error ("explicit instantiation of %q#D", decl);
return;
}
else
result = decl;
/* Check for various error cases. Note that if the explicit
instantiation is valid the RESULT will currently be marked as an
*implicit* instantiation; DECL_EXPLICIT_INSTANTIATION is not set
until we get here. */
if (DECL_TEMPLATE_SPECIALIZATION (result))
{
/* DR 259 [temp.spec].
Both an explicit instantiation and a declaration of an explicit
specialization shall not appear in a program unless the explicit
instantiation follows a declaration of the explicit specialization.
For a given set of template parameters, if an explicit
instantiation of a template appears after a declaration of an
explicit specialization for that template, the explicit
instantiation has no effect. */
return;
}
else if (DECL_EXPLICIT_INSTANTIATION (result))
{
/* [temp.spec]
No program shall explicitly instantiate any template more
than once.
We check DECL_NOT_REALLY_EXTERN so as not to complain when
the first instantiation was `extern' and the second is not,
and EXTERN_P for the opposite case. */
if (DECL_NOT_REALLY_EXTERN (result) && !extern_p)
permerror (input_location, "duplicate explicit instantiation of %q#D", result);
/* If an "extern" explicit instantiation follows an ordinary
explicit instantiation, the template is instantiated. */
if (extern_p)
return;
}
else if (!DECL_IMPLICIT_INSTANTIATION (result))
{
error ("no matching template for %qD found", result);
return;
}
else if (!DECL_TEMPLATE_INFO (result))
{
permerror (input_location, "explicit instantiation of non-template %q#D", result);
return;
}
if (storage == NULL_TREE)
;
else if (storage == ridpointers[(int) RID_EXTERN])
{
if (!in_system_header_at (input_location) && (cxx_dialect == cxx98))
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ 1998 forbids the use of %<extern%> on explicit "
"instantiations");
extern_p = 1;
}
else
error ("storage class %qD applied to template instantiation", storage);
check_explicit_instantiation_namespace (result);
mark_decl_instantiated (result, extern_p);
if (! extern_p)
instantiate_decl (result, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/false);
}
static void
mark_class_instantiated (tree t, int extern_p)
{
SET_CLASSTYPE_EXPLICIT_INSTANTIATION (t);
SET_CLASSTYPE_INTERFACE_KNOWN (t);
CLASSTYPE_INTERFACE_ONLY (t) = extern_p;
TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = extern_p;
if (! extern_p)
{
CLASSTYPE_DEBUG_REQUESTED (t) = 1;
rest_of_type_compilation (t, 1);
}
}
/* Called from do_type_instantiation through binding_table_foreach to
do recursive instantiation for the type bound in ENTRY. */
static void
bt_instantiate_type_proc (binding_entry entry, void *data)
{
tree storage = *(tree *) data;
if (MAYBE_CLASS_TYPE_P (entry->type)
&& !uses_template_parms (CLASSTYPE_TI_ARGS (entry->type)))
do_type_instantiation (TYPE_MAIN_DECL (entry->type), storage, 0);
}
/* Called from do_type_instantiation to instantiate a member
(a member function or a static member variable) of an
explicitly instantiated class template. */
static void
instantiate_class_member (tree decl, int extern_p)
{
mark_decl_instantiated (decl, extern_p);
if (! extern_p)
instantiate_decl (decl, /*defer_ok=*/true,
/*expl_inst_class_mem_p=*/true);
}
/* Perform an explicit instantiation of template class T. STORAGE, if
non-null, is the RID for extern, inline or static. COMPLAIN is
nonzero if this is called from the parser, zero if called recursively,
since the standard is unclear (as detailed below). */
void
do_type_instantiation (tree t, tree storage, tsubst_flags_t complain)
{
int extern_p = 0;
int nomem_p = 0;
int static_p = 0;
int previous_instantiation_extern_p = 0;
if (TREE_CODE (t) == TYPE_DECL)
t = TREE_TYPE (t);
if (! CLASS_TYPE_P (t) || ! CLASSTYPE_TEMPLATE_INFO (t))
{
tree tmpl =
(TYPE_TEMPLATE_INFO (t)) ? TYPE_TI_TEMPLATE (t) : NULL;
if (tmpl)
error ("explicit instantiation of non-class template %qD", tmpl);
else
error ("explicit instantiation of non-template type %qT", t);
return;
}
complete_type (t);
if (!COMPLETE_TYPE_P (t))
{
if (complain & tf_error)
error ("explicit instantiation of %q#T before definition of template",
t);
return;
}
if (storage != NULL_TREE)
{
if (!in_system_header_at (input_location))
{
if (storage == ridpointers[(int) RID_EXTERN])
{
if (cxx_dialect == cxx98)
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ 1998 forbids the use of %<extern%> on "
"explicit instantiations");
}
else
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ forbids the use of %qE"
" on explicit instantiations", storage);
}
if (storage == ridpointers[(int) RID_INLINE])
nomem_p = 1;
else if (storage == ridpointers[(int) RID_EXTERN])
extern_p = 1;
else if (storage == ridpointers[(int) RID_STATIC])
static_p = 1;
else
{
error ("storage class %qD applied to template instantiation",
storage);
extern_p = 0;
}
}
if (CLASSTYPE_TEMPLATE_SPECIALIZATION (t))
{
/* DR 259 [temp.spec].
Both an explicit instantiation and a declaration of an explicit
specialization shall not appear in a program unless the explicit
instantiation follows a declaration of the explicit specialization.
For a given set of template parameters, if an explicit
instantiation of a template appears after a declaration of an
explicit specialization for that template, the explicit
instantiation has no effect. */
return;
}
else if (CLASSTYPE_EXPLICIT_INSTANTIATION (t))
{
/* [temp.spec]
No program shall explicitly instantiate any template more
than once.
If PREVIOUS_INSTANTIATION_EXTERN_P, then the first explicit
instantiation was `extern'. If EXTERN_P then the second is.
These cases are OK. */
previous_instantiation_extern_p = CLASSTYPE_INTERFACE_ONLY (t);
if (!previous_instantiation_extern_p && !extern_p
&& (complain & tf_error))
permerror (input_location, "duplicate explicit instantiation of %q#T", t);
/* If we've already instantiated the template, just return now. */
if (!CLASSTYPE_INTERFACE_ONLY (t))
return;
}
check_explicit_instantiation_namespace (TYPE_NAME (t));
mark_class_instantiated (t, extern_p);
if (nomem_p)
return;
{
tree tmp;
/* In contrast to implicit instantiation, where only the
declarations, and not the definitions, of members are
instantiated, we have here:
[temp.explicit]
The explicit instantiation of a class template specialization
implies the instantiation of all of its members not
previously explicitly specialized in the translation unit
containing the explicit instantiation.
Of course, we can't instantiate member template classes, since
we don't have any arguments for them. Note that the standard
is unclear on whether the instantiation of the members are
*explicit* instantiations or not. However, the most natural
interpretation is that it should be an explicit instantiation. */
if (! static_p)
for (tmp = TYPE_METHODS (t); tmp; tmp = DECL_CHAIN (tmp))
if (TREE_CODE (tmp) == FUNCTION_DECL
&& DECL_TEMPLATE_INSTANTIATION (tmp)
&& user_provided_p (tmp))
instantiate_class_member (tmp, extern_p);
for (tmp = TYPE_FIELDS (t); tmp; tmp = DECL_CHAIN (tmp))
if (VAR_P (tmp) && DECL_TEMPLATE_INSTANTIATION (tmp))
instantiate_class_member (tmp, extern_p);
if (CLASSTYPE_NESTED_UTDS (t))
binding_table_foreach (CLASSTYPE_NESTED_UTDS (t),
bt_instantiate_type_proc, &storage);
}
}
/* Given a function DECL, which is a specialization of TMPL, modify
DECL to be a re-instantiation of TMPL with the same template
arguments. TMPL should be the template into which tsubst'ing
should occur for DECL, not the most general template.
One reason for doing this is a scenario like this:
template <class T>
void f(const T&, int i);
void g() { f(3, 7); }
template <class T>
void f(const T& t, const int i) { }
Note that when the template is first instantiated, with
instantiate_template, the resulting DECL will have no name for the
first parameter, and the wrong type for the second. So, when we go
to instantiate the DECL, we regenerate it. */
static void
regenerate_decl_from_template (tree decl, tree tmpl, tree args)
{
/* The arguments used to instantiate DECL, from the most general
template. */
tree code_pattern;
code_pattern = DECL_TEMPLATE_RESULT (tmpl);
/* Make sure that we can see identifiers, and compute access
correctly. */
push_access_scope (decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
{
tree decl_parm;
tree pattern_parm;
tree specs;
int args_depth;
int parms_depth;
args_depth = TMPL_ARGS_DEPTH (args);
parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl));
if (args_depth > parms_depth)
args = get_innermost_template_args (args, parms_depth);
specs = tsubst_exception_specification (TREE_TYPE (code_pattern),
args, tf_error, NULL_TREE,
/*defer_ok*/false);
if (specs && specs != error_mark_node)
TREE_TYPE (decl) = build_exception_variant (TREE_TYPE (decl),
specs);
/* Merge parameter declarations. */
decl_parm = skip_artificial_parms_for (decl,
DECL_ARGUMENTS (decl));
pattern_parm
= skip_artificial_parms_for (code_pattern,
DECL_ARGUMENTS (code_pattern));
while (decl_parm && !DECL_PACK_P (pattern_parm))
{
tree parm_type;
tree attributes;
if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm))
DECL_NAME (decl_parm) = DECL_NAME (pattern_parm);
parm_type = tsubst (TREE_TYPE (pattern_parm), args, tf_error,
NULL_TREE);
parm_type = type_decays_to (parm_type);
if (!same_type_p (TREE_TYPE (decl_parm), parm_type))
TREE_TYPE (decl_parm) = parm_type;
attributes = DECL_ATTRIBUTES (pattern_parm);
if (DECL_ATTRIBUTES (decl_parm) != attributes)
{
DECL_ATTRIBUTES (decl_parm) = attributes;
cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0);
}
decl_parm = DECL_CHAIN (decl_parm);
pattern_parm = DECL_CHAIN (pattern_parm);
}
/* Merge any parameters that match with the function parameter
pack. */
if (pattern_parm && DECL_PACK_P (pattern_parm))
{
int i, len;
tree expanded_types;
/* Expand the TYPE_PACK_EXPANSION that provides the types for
the parameters in this function parameter pack. */
expanded_types = tsubst_pack_expansion (TREE_TYPE (pattern_parm),
args, tf_error, NULL_TREE);
len = TREE_VEC_LENGTH (expanded_types);
for (i = 0; i < len; i++)
{
tree parm_type;
tree attributes;
if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm))
/* Rename the parameter to include the index. */
DECL_NAME (decl_parm) =
make_ith_pack_parameter_name (DECL_NAME (pattern_parm), i);
parm_type = TREE_VEC_ELT (expanded_types, i);
parm_type = type_decays_to (parm_type);
if (!same_type_p (TREE_TYPE (decl_parm), parm_type))
TREE_TYPE (decl_parm) = parm_type;
attributes = DECL_ATTRIBUTES (pattern_parm);
if (DECL_ATTRIBUTES (decl_parm) != attributes)
{
DECL_ATTRIBUTES (decl_parm) = attributes;
cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0);
}
decl_parm = DECL_CHAIN (decl_parm);
}
}
/* Merge additional specifiers from the CODE_PATTERN. */
if (DECL_DECLARED_INLINE_P (code_pattern)
&& !DECL_DECLARED_INLINE_P (decl))
DECL_DECLARED_INLINE_P (decl) = 1;
}
else if (VAR_P (decl))
{
DECL_INITIAL (decl) =
tsubst_expr (DECL_INITIAL (code_pattern), args,
tf_error, DECL_TI_TEMPLATE (decl),
/*integral_constant_expression_p=*/false);
if (VAR_HAD_UNKNOWN_BOUND (decl))
TREE_TYPE (decl) = tsubst (TREE_TYPE (code_pattern), args,
tf_error, DECL_TI_TEMPLATE (decl));
}
else
gcc_unreachable ();
pop_access_scope (decl);
}
/* Return the TEMPLATE_DECL into which DECL_TI_ARGS(DECL) should be
substituted to get DECL. */
tree
template_for_substitution (tree decl)
{
tree tmpl = DECL_TI_TEMPLATE (decl);
/* Set TMPL to the template whose DECL_TEMPLATE_RESULT is the pattern
for the instantiation. This is not always the most general
template. Consider, for example:
template <class T>
struct S { template <class U> void f();
template <> void f<int>(); };
and an instantiation of S<double>::f<int>. We want TD to be the
specialization S<T>::f<int>, not the more general S<T>::f<U>. */
while (/* An instantiation cannot have a definition, so we need a
more general template. */
DECL_TEMPLATE_INSTANTIATION (tmpl)
/* We must also deal with friend templates. Given:
template <class T> struct S {
template <class U> friend void f() {};
};
S<int>::f<U> say, is not an instantiation of S<T>::f<U>,
so far as the language is concerned, but that's still
where we get the pattern for the instantiation from. On
other hand, if the definition comes outside the class, say:
template <class T> struct S {
template <class U> friend void f();
};
template <class U> friend void f() {}
we don't need to look any further. That's what the check for
DECL_INITIAL is for. */
|| (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (tmpl)
&& !DECL_INITIAL (DECL_TEMPLATE_RESULT (tmpl))))
{
/* The present template, TD, should not be a definition. If it
were a definition, we should be using it! Note that we
cannot restructure the loop to just keep going until we find
a template with a definition, since that might go too far if
a specialization was declared, but not defined. */
/* Fetch the more general template. */
tmpl = DECL_TI_TEMPLATE (tmpl);
}
return tmpl;
}
/* Returns true if we need to instantiate this template instance even if we
know we aren't going to emit it. */
bool
always_instantiate_p (tree decl)
{
/* We always instantiate inline functions so that we can inline them. An
explicit instantiation declaration prohibits implicit instantiation of
non-inline functions. With high levels of optimization, we would
normally inline non-inline functions -- but we're not allowed to do
that for "extern template" functions. Therefore, we check
DECL_DECLARED_INLINE_P, rather than possibly_inlined_p. */
return ((TREE_CODE (decl) == FUNCTION_DECL
&& (DECL_DECLARED_INLINE_P (decl)
|| type_uses_auto (TREE_TYPE (TREE_TYPE (decl)))))
/* And we need to instantiate static data members so that
their initializers are available in integral constant
expressions. */
|| (VAR_P (decl)
&& decl_maybe_constant_var_p (decl)));
}
/* If FN has a noexcept-specifier that hasn't been instantiated yet,
instantiate it now, modifying TREE_TYPE (fn). */
void
maybe_instantiate_noexcept (tree fn)
{
tree fntype, spec, noex, clone;
/* Don't instantiate a noexcept-specification from template context. */
if (processing_template_decl)
return;
if (DECL_CLONED_FUNCTION_P (fn))
fn = DECL_CLONED_FUNCTION (fn);
fntype = TREE_TYPE (fn);
spec = TYPE_RAISES_EXCEPTIONS (fntype);
if (!spec || !TREE_PURPOSE (spec))
return;
noex = TREE_PURPOSE (spec);
if (TREE_CODE (noex) == DEFERRED_NOEXCEPT)
{
if (DEFERRED_NOEXCEPT_PATTERN (noex) == NULL_TREE)
spec = get_defaulted_eh_spec (fn);
else if (push_tinst_level (fn))
{
push_access_scope (fn);
push_deferring_access_checks (dk_no_deferred);
input_location = DECL_SOURCE_LOCATION (fn);
noex = tsubst_copy_and_build (DEFERRED_NOEXCEPT_PATTERN (noex),
DEFERRED_NOEXCEPT_ARGS (noex),
tf_warning_or_error, fn,
/*function_p=*/false,
/*integral_constant_expression_p=*/true);
pop_deferring_access_checks ();
pop_access_scope (fn);
pop_tinst_level ();
spec = build_noexcept_spec (noex, tf_warning_or_error);
if (spec == error_mark_node)
spec = noexcept_false_spec;
}
else
spec = noexcept_false_spec;
TREE_TYPE (fn) = build_exception_variant (fntype, spec);
}
FOR_EACH_CLONE (clone, fn)
{
if (TREE_TYPE (clone) == fntype)
TREE_TYPE (clone) = TREE_TYPE (fn);
else
TREE_TYPE (clone) = build_exception_variant (TREE_TYPE (clone), spec);
}
}
/* Produce the definition of D, a _DECL generated from a template. If
DEFER_OK is true, then we don't have to actually do the
instantiation now; we just have to do it sometime. Normally it is
an error if this is an explicit instantiation but D is undefined.
EXPL_INST_CLASS_MEM_P is true iff D is a member of an explicitly
instantiated class template. */
tree
instantiate_decl (tree d, bool defer_ok, bool expl_inst_class_mem_p)
{
tree tmpl = DECL_TI_TEMPLATE (d);
tree gen_args;
tree args;
tree td;
tree code_pattern;
tree spec;
tree gen_tmpl;
bool pattern_defined;
location_t saved_loc = input_location;
int saved_unevaluated_operand = cp_unevaluated_operand;
int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
bool external_p;
bool deleted_p;
/* This function should only be used to instantiate templates for
functions and static member variables. */
gcc_assert (VAR_OR_FUNCTION_DECL_P (d));
/* A concept is never instantiated. */
gcc_assert (!DECL_DECLARED_CONCEPT_P (d));
/* Variables are never deferred; if instantiation is required, they
are instantiated right away. That allows for better code in the
case that an expression refers to the value of the variable --
if the variable has a constant value the referring expression can
take advantage of that fact. */
if (VAR_P (d))
defer_ok = false;
/* Don't instantiate cloned functions. Instead, instantiate the
functions they cloned. */
if (TREE_CODE (d) == FUNCTION_DECL && DECL_CLONED_FUNCTION_P (d))
d = DECL_CLONED_FUNCTION (d);
if (DECL_TEMPLATE_INSTANTIATED (d)
|| (TREE_CODE (d) == FUNCTION_DECL
&& DECL_DEFAULTED_FN (d) && DECL_INITIAL (d))
|| DECL_TEMPLATE_SPECIALIZATION (d))
/* D has already been instantiated or explicitly specialized, so
there's nothing for us to do here.
It might seem reasonable to check whether or not D is an explicit
instantiation, and, if so, stop here. But when an explicit
instantiation is deferred until the end of the compilation,
DECL_EXPLICIT_INSTANTIATION is set, even though we still need to do
the instantiation. */
return d;
/* Check to see whether we know that this template will be
instantiated in some other file, as with "extern template"
extension. */
external_p = (DECL_INTERFACE_KNOWN (d) && DECL_REALLY_EXTERN (d));
/* In general, we do not instantiate such templates. */
if (external_p && !always_instantiate_p (d))
return d;
gen_tmpl = most_general_template (tmpl);
gen_args = DECL_TI_ARGS (d);
if (tmpl != gen_tmpl)
/* We should already have the extra args. */
gcc_assert (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl))
== TMPL_ARGS_DEPTH (gen_args));
/* And what's in the hash table should match D. */
gcc_assert ((spec = retrieve_specialization (gen_tmpl, gen_args, 0)) == d
|| spec == NULL_TREE);
/* This needs to happen before any tsubsting. */
if (! push_tinst_level (d))
return d;
timevar_push (TV_TEMPLATE_INST);
/* Set TD to the template whose DECL_TEMPLATE_RESULT is the pattern
for the instantiation. */
td = template_for_substitution (d);
args = gen_args;
if (VAR_P (d))
{
/* Look up an explicit specialization, if any. */
tree tid = lookup_template_variable (gen_tmpl, gen_args);
tree elt = most_specialized_partial_spec (tid, tf_warning_or_error);
if (elt && elt != error_mark_node)
{
td = TREE_VALUE (elt);
args = TREE_PURPOSE (elt);
}
}
code_pattern = DECL_TEMPLATE_RESULT (td);
/* We should never be trying to instantiate a member of a class
template or partial specialization. */
gcc_assert (d != code_pattern);
if ((DECL_NAMESPACE_SCOPE_P (d) && !DECL_INITIALIZED_IN_CLASS_P (d))
|| DECL_TEMPLATE_SPECIALIZATION (td))
/* In the case of a friend template whose definition is provided
outside the class, we may have too many arguments. Drop the
ones we don't need. The same is true for specializations. */
args = get_innermost_template_args
(args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td)));
if (TREE_CODE (d) == FUNCTION_DECL)
{
deleted_p = DECL_DELETED_FN (code_pattern);
pattern_defined = ((DECL_SAVED_TREE (code_pattern) != NULL_TREE
&& DECL_INITIAL (code_pattern) != error_mark_node)
|| DECL_DEFAULTED_OUTSIDE_CLASS_P (code_pattern)
|| deleted_p);
}
else
{
deleted_p = false;
if (DECL_CLASS_SCOPE_P (code_pattern))
pattern_defined = (! DECL_IN_AGGR_P (code_pattern)
|| DECL_INLINE_VAR_P (code_pattern));
else
pattern_defined = ! DECL_EXTERNAL (code_pattern);
}
/* We may be in the middle of deferred access check. Disable it now. */
push_deferring_access_checks (dk_no_deferred);
/* Unless an explicit instantiation directive has already determined
the linkage of D, remember that a definition is available for
this entity. */
if (pattern_defined
&& !DECL_INTERFACE_KNOWN (d)
&& !DECL_NOT_REALLY_EXTERN (d))
mark_definable (d);
DECL_SOURCE_LOCATION (td) = DECL_SOURCE_LOCATION (code_pattern);
DECL_SOURCE_LOCATION (d) = DECL_SOURCE_LOCATION (code_pattern);
input_location = DECL_SOURCE_LOCATION (d);
/* If D is a member of an explicitly instantiated class template,
and no definition is available, treat it like an implicit
instantiation. */
if (!pattern_defined && expl_inst_class_mem_p
&& DECL_EXPLICIT_INSTANTIATION (d))
{
/* Leave linkage flags alone on instantiations with anonymous
visibility. */
if (TREE_PUBLIC (d))
{
DECL_NOT_REALLY_EXTERN (d) = 0;
DECL_INTERFACE_KNOWN (d) = 0;
}
SET_DECL_IMPLICIT_INSTANTIATION (d);
}
/* Defer all other templates, unless we have been explicitly
forbidden from doing so. */
if (/* If there is no definition, we cannot instantiate the
template. */
! pattern_defined
/* If it's OK to postpone instantiation, do so. */
|| defer_ok
/* If this is a static data member that will be defined
elsewhere, we don't want to instantiate the entire data
member, but we do want to instantiate the initializer so that
we can substitute that elsewhere. */
|| (external_p && VAR_P (d))
/* Handle here a deleted function too, avoid generating
its body (c++/61080). */
|| deleted_p)
{
/* The definition of the static data member is now required so
we must substitute the initializer. */
if (VAR_P (d)
&& !DECL_INITIAL (d)
&& DECL_INITIAL (code_pattern))
{
tree ns;
tree init;
bool const_init = false;
bool enter_context = DECL_CLASS_SCOPE_P (d);
ns = decl_namespace_context (d);
push_nested_namespace (ns);
if (enter_context)
push_nested_class (DECL_CONTEXT (d));
init = tsubst_expr (DECL_INITIAL (code_pattern),
args,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/false);
/* If instantiating the initializer involved instantiating this
again, don't call cp_finish_decl twice. */
if (!DECL_INITIAL (d))
{
/* Make sure the initializer is still constant, in case of
circular dependency (template/instantiate6.C). */
const_init
= DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern);
cp_finish_decl (d, init, /*init_const_expr_p=*/const_init,
/*asmspec_tree=*/NULL_TREE,
LOOKUP_ONLYCONVERTING);
}
if (enter_context)
pop_nested_class ();
pop_nested_namespace (ns);
}
/* We restore the source position here because it's used by
add_pending_template. */
input_location = saved_loc;
if (at_eof && !pattern_defined
&& DECL_EXPLICIT_INSTANTIATION (d)
&& DECL_NOT_REALLY_EXTERN (d))
/* [temp.explicit]
The definition of a non-exported function template, a
non-exported member function template, or a non-exported
member function or static data member of a class template
shall be present in every translation unit in which it is
explicitly instantiated. */
permerror (input_location, "explicit instantiation of %qD "
"but no definition available", d);
/* If we're in unevaluated context, we just wanted to get the
constant value; this isn't an odr use, so don't queue
a full instantiation. */
if (cp_unevaluated_operand != 0)
goto out;
/* ??? Historically, we have instantiated inline functions, even
when marked as "extern template". */
if (!(external_p && VAR_P (d)))
add_pending_template (d);
goto out;
}
/* Tell the repository that D is available in this translation unit
-- and see if it is supposed to be instantiated here. */
if (TREE_PUBLIC (d) && !DECL_REALLY_EXTERN (d) && !repo_emit_p (d))
{
/* In a PCH file, despite the fact that the repository hasn't
requested instantiation in the PCH it is still possible that
an instantiation will be required in a file that includes the
PCH. */
if (pch_file)
add_pending_template (d);
/* Instantiate inline functions so that the inliner can do its
job, even though we'll not be emitting a copy of this
function. */
if (!(TREE_CODE (d) == FUNCTION_DECL && possibly_inlined_p (d)))
goto out;
}
bool push_to_top, nested;
tree fn_context;
fn_context = decl_function_context (d);
nested = current_function_decl != NULL_TREE;
push_to_top = !(nested && fn_context == current_function_decl);
vec<tree> omp_privatization_save;
if (nested)
save_omp_privatization_clauses (omp_privatization_save);
if (push_to_top)
push_to_top_level ();
else
{
push_function_context ();
cp_unevaluated_operand = 0;
c_inhibit_evaluation_warnings = 0;
}
/* Mark D as instantiated so that recursive calls to
instantiate_decl do not try to instantiate it again. */
DECL_TEMPLATE_INSTANTIATED (d) = 1;
/* Regenerate the declaration in case the template has been modified
by a subsequent redeclaration. */
regenerate_decl_from_template (d, td, args);
/* We already set the file and line above. Reset them now in case
they changed as a result of calling regenerate_decl_from_template. */
input_location = DECL_SOURCE_LOCATION (d);
if (VAR_P (d))
{
tree init;
bool const_init = false;
/* Clear out DECL_RTL; whatever was there before may not be right
since we've reset the type of the declaration. */
SET_DECL_RTL (d, NULL);
DECL_IN_AGGR_P (d) = 0;
/* The initializer is placed in DECL_INITIAL by
regenerate_decl_from_template so we don't need to
push/pop_access_scope again here. Pull it out so that
cp_finish_decl can process it. */
init = DECL_INITIAL (d);
DECL_INITIAL (d) = NULL_TREE;
DECL_INITIALIZED_P (d) = 0;
/* Clear DECL_EXTERNAL so that cp_finish_decl will process the
initializer. That function will defer actual emission until
we have a chance to determine linkage. */
DECL_EXTERNAL (d) = 0;
/* Enter the scope of D so that access-checking works correctly. */
bool enter_context = DECL_CLASS_SCOPE_P (d);
if (enter_context)
push_nested_class (DECL_CONTEXT (d));
const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern);
cp_finish_decl (d, init, const_init, NULL_TREE, 0);
if (enter_context)
pop_nested_class ();
if (variable_template_p (gen_tmpl))
note_variable_template_instantiation (d);
}
else if (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (code_pattern))
synthesize_method (d);
else if (TREE_CODE (d) == FUNCTION_DECL)
{
hash_map<tree, tree> *saved_local_specializations;
tree tmpl_parm;
tree spec_parm;
tree block = NULL_TREE;
tree lambda_ctx = NULL_TREE;
/* Save away the current list, in case we are instantiating one
template from within the body of another. */
saved_local_specializations = local_specializations;
/* Set up the list of local specializations. */
local_specializations = new hash_map<tree, tree>;
/* Set up context. */
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)
&& TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL)
block = push_stmt_list ();
else
{
if (push_to_top && LAMBDA_FUNCTION_P (d))
{
/* When instantiating a lambda's templated function
operator, we need to push the non-lambda class scope
of the lambda itself so that the nested function
stack is sufficiently correct to deal with this
capture. */
lambda_ctx = DECL_CONTEXT (d);
do
lambda_ctx = decl_type_context (TYPE_NAME (lambda_ctx));
while (lambda_ctx && LAMBDA_TYPE_P (lambda_ctx));
if (lambda_ctx)
push_nested_class (lambda_ctx);
}
start_preparsed_function (d, NULL_TREE, SF_PRE_PARSED);
}
/* Some typedefs referenced from within the template code need to be
access checked at template instantiation time, i.e now. These
types were added to the template at parsing time. Let's get those
and perform the access checks then. */
perform_typedefs_access_check (DECL_TEMPLATE_RESULT (td),
args);
/* Create substitution entries for the parameters. */
tmpl_parm = DECL_ARGUMENTS (code_pattern);
spec_parm = DECL_ARGUMENTS (d);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (d))
{
register_local_specialization (spec_parm, tmpl_parm);
spec_parm = skip_artificial_parms_for (d, spec_parm);
tmpl_parm = skip_artificial_parms_for (code_pattern, tmpl_parm);
}
for (; tmpl_parm; tmpl_parm = DECL_CHAIN (tmpl_parm))
{
if (!DECL_PACK_P (tmpl_parm))
{
register_local_specialization (spec_parm, tmpl_parm);
spec_parm = DECL_CHAIN (spec_parm);
}
else
{
/* Register the (value) argument pack as a specialization of
TMPL_PARM, then move on. */
tree argpack = extract_fnparm_pack (tmpl_parm, &spec_parm);
register_local_specialization (argpack, tmpl_parm);
}
}
gcc_assert (!spec_parm);
/* Substitute into the body of the function. */
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern))
tsubst_omp_udr (DECL_SAVED_TREE (code_pattern), args,
tf_warning_or_error, tmpl);
else
{
tsubst_expr (DECL_SAVED_TREE (code_pattern), args,
tf_warning_or_error, tmpl,
/*integral_constant_expression_p=*/false);
/* Set the current input_location to the end of the function
so that finish_function knows where we are. */
input_location
= DECL_STRUCT_FUNCTION (code_pattern)->function_end_locus;
/* Remember if we saw an infinite loop in the template. */
current_function_infinite_loop
= DECL_STRUCT_FUNCTION (code_pattern)->language->infinite_loop;
}
/* We don't need the local specializations any more. */
delete local_specializations;
local_specializations = saved_local_specializations;
/* Finish the function. */
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)
&& TREE_CODE (DECL_CONTEXT (code_pattern)) == FUNCTION_DECL)
DECL_SAVED_TREE (d) = pop_stmt_list (block);
else
{
d = finish_function (0);
expand_or_defer_fn (d);
}
if (lambda_ctx)
pop_nested_class ();
if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern))
cp_check_omp_declare_reduction (d);
}
/* We're not deferring instantiation any more. */
TI_PENDING_TEMPLATE_FLAG (DECL_TEMPLATE_INFO (d)) = 0;
if (push_to_top)
pop_from_top_level ();
else
pop_function_context ();
if (nested)
restore_omp_privatization_clauses (omp_privatization_save);
out:
pop_deferring_access_checks ();
timevar_pop (TV_TEMPLATE_INST);
pop_tinst_level ();
input_location = saved_loc;
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
return d;
}
/* Run through the list of templates that we wish we could
instantiate, and instantiate any we can. RETRIES is the
number of times we retry pending template instantiation. */
void
instantiate_pending_templates (int retries)
{
int reconsider;
location_t saved_loc = input_location;
/* Instantiating templates may trigger vtable generation. This in turn
may require further template instantiations. We place a limit here
to avoid infinite loop. */
if (pending_templates && retries >= max_tinst_depth)
{
tree decl = pending_templates->tinst->decl;
fatal_error (input_location,
"template instantiation depth exceeds maximum of %d"
" instantiating %q+D, possibly from virtual table generation"
" (use -ftemplate-depth= to increase the maximum)",
max_tinst_depth, decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
/* Pretend that we defined it. */
DECL_INITIAL (decl) = error_mark_node;
return;
}
do
{
struct pending_template **t = &pending_templates;
struct pending_template *last = NULL;
reconsider = 0;
while (*t)
{
tree instantiation = reopen_tinst_level ((*t)->tinst);
bool complete = false;
if (TYPE_P (instantiation))
{
tree fn;
if (!COMPLETE_TYPE_P (instantiation))
{
instantiate_class_template (instantiation);
if (CLASSTYPE_TEMPLATE_INSTANTIATION (instantiation))
for (fn = TYPE_METHODS (instantiation);
fn;
fn = TREE_CHAIN (fn))
if (! DECL_ARTIFICIAL (fn))
instantiate_decl (fn,
/*defer_ok=*/false,
/*expl_inst_class_mem_p=*/false);
if (COMPLETE_TYPE_P (instantiation))
reconsider = 1;
}
complete = COMPLETE_TYPE_P (instantiation);
}
else
{
if (!DECL_TEMPLATE_SPECIALIZATION (instantiation)
&& !DECL_TEMPLATE_INSTANTIATED (instantiation))
{
instantiation
= instantiate_decl (instantiation,
/*defer_ok=*/false,
/*expl_inst_class_mem_p=*/false);
if (DECL_TEMPLATE_INSTANTIATED (instantiation))
reconsider = 1;
}
complete = (DECL_TEMPLATE_SPECIALIZATION (instantiation)
|| DECL_TEMPLATE_INSTANTIATED (instantiation));
}
if (complete)
/* If INSTANTIATION has been instantiated, then we don't
need to consider it again in the future. */
*t = (*t)->next;
else
{
last = *t;
t = &(*t)->next;
}
tinst_depth = 0;
current_tinst_level = NULL;
}
last_pending_template = last;
}
while (reconsider);
input_location = saved_loc;
}
/* Substitute ARGVEC into T, which is a list of initializers for
either base class or a non-static data member. The TREE_PURPOSEs
are DECLs, and the TREE_VALUEs are the initializer values. Used by
instantiate_decl. */
static tree
tsubst_initializer_list (tree t, tree argvec)
{
tree inits = NULL_TREE;
for (; t; t = TREE_CHAIN (t))
{
tree decl;
tree init;
tree expanded_bases = NULL_TREE;
tree expanded_arguments = NULL_TREE;
int i, len = 1;
if (TREE_CODE (TREE_PURPOSE (t)) == TYPE_PACK_EXPANSION)
{
tree expr;
tree arg;
/* Expand the base class expansion type into separate base
classes. */
expanded_bases = tsubst_pack_expansion (TREE_PURPOSE (t), argvec,
tf_warning_or_error,
NULL_TREE);
if (expanded_bases == error_mark_node)
continue;
/* We'll be building separate TREE_LISTs of arguments for
each base. */
len = TREE_VEC_LENGTH (expanded_bases);
expanded_arguments = make_tree_vec (len);
for (i = 0; i < len; i++)
TREE_VEC_ELT (expanded_arguments, i) = NULL_TREE;
/* Build a dummy EXPR_PACK_EXPANSION that will be used to
expand each argument in the TREE_VALUE of t. */
expr = make_node (EXPR_PACK_EXPANSION);
PACK_EXPANSION_LOCAL_P (expr) = true;
PACK_EXPANSION_PARAMETER_PACKS (expr) =
PACK_EXPANSION_PARAMETER_PACKS (TREE_PURPOSE (t));
if (TREE_VALUE (t) == void_type_node)
/* VOID_TYPE_NODE is used to indicate
value-initialization. */
{
for (i = 0; i < len; i++)
TREE_VEC_ELT (expanded_arguments, i) = void_type_node;
}
else
{
/* Substitute parameter packs into each argument in the
TREE_LIST. */
in_base_initializer = 1;
for (arg = TREE_VALUE (t); arg; arg = TREE_CHAIN (arg))
{
tree expanded_exprs;
/* Expand the argument. */
SET_PACK_EXPANSION_PATTERN (expr, TREE_VALUE (arg));
expanded_exprs
= tsubst_pack_expansion (expr, argvec,
tf_warning_or_error,
NULL_TREE);
if (expanded_exprs == error_mark_node)
continue;
/* Prepend each of the expanded expressions to the
corresponding TREE_LIST in EXPANDED_ARGUMENTS. */
for (i = 0; i < len; i++)
{
TREE_VEC_ELT (expanded_arguments, i) =
tree_cons (NULL_TREE,
TREE_VEC_ELT (expanded_exprs, i),
TREE_VEC_ELT (expanded_arguments, i));
}
}
in_base_initializer = 0;
/* Reverse all of the TREE_LISTs in EXPANDED_ARGUMENTS,
since we built them backwards. */
for (i = 0; i < len; i++)
{
TREE_VEC_ELT (expanded_arguments, i) =
nreverse (TREE_VEC_ELT (expanded_arguments, i));
}
}
}
for (i = 0; i < len; ++i)
{
if (expanded_bases)
{
decl = TREE_VEC_ELT (expanded_bases, i);
decl = expand_member_init (decl);
init = TREE_VEC_ELT (expanded_arguments, i);
}
else
{
tree tmp;
decl = tsubst_copy (TREE_PURPOSE (t), argvec,
tf_warning_or_error, NULL_TREE);
decl = expand_member_init (decl);
if (decl && !DECL_P (decl))
in_base_initializer = 1;
init = TREE_VALUE (t);
tmp = init;
if (init != void_type_node)
init = tsubst_expr (init, argvec,
tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/false);
if (init == NULL_TREE && tmp != NULL_TREE)
/* If we had an initializer but it instantiated to nothing,
value-initialize the object. This will only occur when
the initializer was a pack expansion where the parameter
packs used in that expansion were of length zero. */
init = void_type_node;
in_base_initializer = 0;
}
if (decl)
{
init = build_tree_list (decl, init);
TREE_CHAIN (init) = inits;
inits = init;
}
}
}
return inits;
}
/* Set CURRENT_ACCESS_SPECIFIER based on the protection of DECL. */
static void
set_current_access_from_decl (tree decl)
{
if (TREE_PRIVATE (decl))
current_access_specifier = access_private_node;
else if (TREE_PROTECTED (decl))
current_access_specifier = access_protected_node;
else
current_access_specifier = access_public_node;
}
/* Instantiate an enumerated type. TAG is the template type, NEWTAG
is the instantiation (which should have been created with
start_enum) and ARGS are the template arguments to use. */
static void
tsubst_enum (tree tag, tree newtag, tree args)
{
tree e;
if (SCOPED_ENUM_P (newtag))
begin_scope (sk_scoped_enum, newtag);
for (e = TYPE_VALUES (tag); e; e = TREE_CHAIN (e))
{
tree value;
tree decl;
decl = TREE_VALUE (e);
/* Note that in a template enum, the TREE_VALUE is the
CONST_DECL, not the corresponding INTEGER_CST. */
value = tsubst_expr (DECL_INITIAL (decl),
args, tf_warning_or_error, NULL_TREE,
/*integral_constant_expression_p=*/true);
/* Give this enumeration constant the correct access. */
set_current_access_from_decl (decl);
/* Actually build the enumerator itself. Here we're assuming that
enumerators can't have dependent attributes. */
build_enumerator (DECL_NAME (decl), value, newtag,
DECL_ATTRIBUTES (decl), DECL_SOURCE_LOCATION (decl));
}
if (SCOPED_ENUM_P (newtag))
finish_scope ();
finish_enum_value_list (newtag);
finish_enum (newtag);
DECL_SOURCE_LOCATION (TYPE_NAME (newtag))
= DECL_SOURCE_LOCATION (TYPE_NAME (tag));
}
/* DECL is a FUNCTION_DECL that is a template specialization. Return
its type -- but without substituting the innermost set of template
arguments. So, innermost set of template parameters will appear in
the type. */
tree
get_mostly_instantiated_function_type (tree decl)
{
/* For a function, DECL_TI_TEMPLATE is partially instantiated. */
return TREE_TYPE (DECL_TI_TEMPLATE (decl));
}
/* Return truthvalue if we're processing a template different from
the last one involved in diagnostics. */
bool
problematic_instantiation_changed (void)
{
return current_tinst_level != last_error_tinst_level;
}
/* Remember current template involved in diagnostics. */
void
record_last_problematic_instantiation (void)
{
last_error_tinst_level = current_tinst_level;
}
struct tinst_level *
current_instantiation (void)
{
return current_tinst_level;
}
/* Return TRUE if current_function_decl is being instantiated, false
otherwise. */
bool
instantiating_current_function_p (void)
{
return (current_instantiation ()
&& current_instantiation ()->decl == current_function_decl);
}
/* [temp.param] Check that template non-type parm TYPE is of an allowable
type. Return zero for ok, nonzero for disallowed. Issue error and
warning messages under control of COMPLAIN. */
static int
invalid_nontype_parm_type_p (tree type, tsubst_flags_t complain)
{
if (INTEGRAL_OR_ENUMERATION_TYPE_P (type))
return 0;
else if (POINTER_TYPE_P (type))
return 0;
else if (TYPE_PTRMEM_P (type))
return 0;
else if (TREE_CODE (type) == TEMPLATE_TYPE_PARM)
return 0;
else if (TREE_CODE (type) == TYPENAME_TYPE)
return 0;
else if (TREE_CODE (type) == DECLTYPE_TYPE)
return 0;
else if (TREE_CODE (type) == NULLPTR_TYPE)
return 0;
/* A bound template template parm could later be instantiated to have a valid
nontype parm type via an alias template. */
else if (cxx_dialect >= cxx11
&& TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
return 0;
if (complain & tf_error)
{
if (type == error_mark_node)
inform (input_location, "invalid template non-type parameter");
else
error ("%q#T is not a valid type for a template non-type parameter",
type);
}
return 1;
}
/* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type].
Assumes that TYPE really is a type, and not the ERROR_MARK_NODE.*/
static bool
dependent_type_p_r (tree type)
{
tree scope;
/* [temp.dep.type]
A type is dependent if it is:
-- a template parameter. Template template parameters are types
for us (since TYPE_P holds true for them) so we handle
them here. */
if (TREE_CODE (type) == TEMPLATE_TYPE_PARM
|| TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM)
return true;
/* -- a qualified-id with a nested-name-specifier which contains a
class-name that names a dependent type or whose unqualified-id
names a dependent type. */
if (TREE_CODE (type) == TYPENAME_TYPE)
return true;
/* An alias template specialization can be dependent even if the
resulting type is not. */
if (dependent_alias_template_spec_p (type))
return true;
/* -- a cv-qualified type where the cv-unqualified type is
dependent.
No code is necessary for this bullet; the code below handles
cv-qualified types, and we don't want to strip aliases with
TYPE_MAIN_VARIANT because of DR 1558. */
/* -- a compound type constructed from any dependent type. */
if (TYPE_PTRMEM_P (type))
return (dependent_type_p (TYPE_PTRMEM_CLASS_TYPE (type))
|| dependent_type_p (TYPE_PTRMEM_POINTED_TO_TYPE
(type)));
else if (TYPE_PTR_P (type)
|| TREE_CODE (type) == REFERENCE_TYPE)
return dependent_type_p (TREE_TYPE (type));
else if (TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == METHOD_TYPE)
{
tree arg_type;
if (dependent_type_p (TREE_TYPE (type)))
return true;
for (arg_type = TYPE_ARG_TYPES (type);
arg_type;
arg_type = TREE_CHAIN (arg_type))
if (dependent_type_p (TREE_VALUE (arg_type)))
return true;
if (cxx_dialect >= cxx1z)
{
/* A value-dependent noexcept-specifier makes the type dependent. */
tree spec = TYPE_RAISES_EXCEPTIONS (type);
if (spec && TREE_PURPOSE (spec)
&& value_dependent_expression_p (TREE_PURPOSE (spec)))
return true;
}
return false;
}
/* -- an array type constructed from any dependent type or whose
size is specified by a constant expression that is
value-dependent.
We checked for type- and value-dependence of the bounds in
compute_array_index_type, so TYPE_DEPENDENT_P is already set. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (type)
&& dependent_type_p (TYPE_DOMAIN (type)))
return true;
return dependent_type_p (TREE_TYPE (type));
}
/* -- a template-id in which either the template name is a template
parameter ... */
if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM)
return true;
/* ... or any of the template arguments is a dependent type or
an expression that is type-dependent or value-dependent. */
else if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type)
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type)))))
return true;
/* All TYPEOF_TYPEs, DECLTYPE_TYPEs, and UNDERLYING_TYPEs are
dependent; if the argument of the `typeof' expression is not
type-dependent, then it should already been have resolved. */
if (TREE_CODE (type) == TYPEOF_TYPE
|| TREE_CODE (type) == DECLTYPE_TYPE
|| TREE_CODE (type) == UNDERLYING_TYPE)
return true;
/* A template argument pack is dependent if any of its packed
arguments are. */
if (TREE_CODE (type) == TYPE_ARGUMENT_PACK)
{
tree args = ARGUMENT_PACK_ARGS (type);
int i, len = TREE_VEC_LENGTH (args);
for (i = 0; i < len; ++i)
if (dependent_template_arg_p (TREE_VEC_ELT (args, i)))
return true;
}
/* All TYPE_PACK_EXPANSIONs are dependent, because parameter packs must
be template parameters. */
if (TREE_CODE (type) == TYPE_PACK_EXPANSION)
return true;
if (any_dependent_type_attributes_p (TYPE_ATTRIBUTES (type)))
return true;
/* The standard does not specifically mention types that are local
to template functions or local classes, but they should be
considered dependent too. For example:
template <int I> void f() {
enum E { a = I };
S<sizeof (E)> s;
}
The size of `E' cannot be known until the value of `I' has been
determined. Therefore, `E' must be considered dependent. */
scope = TYPE_CONTEXT (type);
if (scope && TYPE_P (scope))
return dependent_type_p (scope);
/* Don't use type_dependent_expression_p here, as it can lead
to infinite recursion trying to determine whether a lambda
nested in a lambda is dependent (c++/47687). */
else if (scope && TREE_CODE (scope) == FUNCTION_DECL
&& DECL_LANG_SPECIFIC (scope)
&& DECL_TEMPLATE_INFO (scope)
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (scope)))))
return true;
/* Other types are non-dependent. */
return false;
}
/* Returns TRUE if TYPE is dependent, in the sense of
[temp.dep.type]. Note that a NULL type is considered dependent. */
bool
dependent_type_p (tree type)
{
/* If there are no template parameters in scope, then there can't be
any dependent types. */
if (!processing_template_decl)
{
/* If we are not processing a template, then nobody should be
providing us with a dependent type. */
gcc_assert (type);
gcc_assert (TREE_CODE (type) != TEMPLATE_TYPE_PARM || is_auto (type));
return false;
}
/* If the type is NULL, we have not computed a type for the entity
in question; in that case, the type is dependent. */
if (!type)
return true;
/* Erroneous types can be considered non-dependent. */
if (type == error_mark_node)
return false;
/* If we have not already computed the appropriate value for TYPE,
do so now. */
if (!TYPE_DEPENDENT_P_VALID (type))
{
TYPE_DEPENDENT_P (type) = dependent_type_p_r (type);
TYPE_DEPENDENT_P_VALID (type) = 1;
}
return TYPE_DEPENDENT_P (type);
}
/* Returns TRUE if SCOPE is a dependent scope, in which we can't do any
lookup. In other words, a dependent type that is not the current
instantiation. */
bool
dependent_scope_p (tree scope)
{
return (scope && TYPE_P (scope) && dependent_type_p (scope)
&& !currently_open_class (scope));
}
/* T is a SCOPE_REF; return whether we need to consider it
instantiation-dependent so that we can check access at instantiation
time even though we know which member it resolves to. */
static bool
instantiation_dependent_scope_ref_p (tree t)
{
if (DECL_P (TREE_OPERAND (t, 1))
&& CLASS_TYPE_P (TREE_OPERAND (t, 0))
&& accessible_in_template_p (TREE_OPERAND (t, 0),
TREE_OPERAND (t, 1)))
return false;
else
return true;
}
/* Returns TRUE if the EXPRESSION is value-dependent, in the sense of
[temp.dep.constexpr]. EXPRESSION is already known to be a constant
expression. */
/* Note that this predicate is not appropriate for general expressions;
only constant expressions (that satisfy potential_constant_expression)
can be tested for value dependence. */
bool
value_dependent_expression_p (tree expression)
{
if (!processing_template_decl || expression == NULL_TREE)
return false;
/* A name declared with a dependent type. */
if (DECL_P (expression) && type_dependent_expression_p (expression))
return true;
switch (TREE_CODE (expression))
{
case BASELINK:
/* A dependent member function of the current instantiation. */
return dependent_type_p (BINFO_TYPE (BASELINK_BINFO (expression)));
case FUNCTION_DECL:
/* A dependent member function of the current instantiation. */
if (DECL_CLASS_SCOPE_P (expression)
&& dependent_type_p (DECL_CONTEXT (expression)))
return true;
break;
case IDENTIFIER_NODE:
/* A name that has not been looked up -- must be dependent. */
return true;
case TEMPLATE_PARM_INDEX:
/* A non-type template parm. */
return true;
case CONST_DECL:
/* A non-type template parm. */
if (DECL_TEMPLATE_PARM_P (expression))
return true;
return value_dependent_expression_p (DECL_INITIAL (expression));
case VAR_DECL:
/* A constant with literal type and is initialized
with an expression that is value-dependent.
Note that a non-dependent parenthesized initializer will have
already been replaced with its constant value, so if we see
a TREE_LIST it must be dependent. */
if (DECL_INITIAL (expression)
&& decl_constant_var_p (expression)
&& (TREE_CODE (DECL_INITIAL (expression)) == TREE_LIST
/* cp_finish_decl doesn't fold reference initializers. */
|| TREE_CODE (TREE_TYPE (expression)) == REFERENCE_TYPE
|| type_dependent_expression_p (DECL_INITIAL (expression))
|| value_dependent_expression_p (DECL_INITIAL (expression))))
return true;
if (DECL_HAS_VALUE_EXPR_P (expression))
{
tree value_expr = DECL_VALUE_EXPR (expression);
if (type_dependent_expression_p (value_expr))
return true;
}
return false;
case DYNAMIC_CAST_EXPR:
case STATIC_CAST_EXPR:
case CONST_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CAST_EXPR:
/* These expressions are value-dependent if the type to which
the cast occurs is dependent or the expression being casted
is value-dependent. */
{
tree type = TREE_TYPE (expression);
if (dependent_type_p (type))
return true;
/* A functional cast has a list of operands. */
expression = TREE_OPERAND (expression, 0);
if (!expression)
{
/* If there are no operands, it must be an expression such
as "int()". This should not happen for aggregate types
because it would form non-constant expressions. */
gcc_assert (cxx_dialect >= cxx11
|| INTEGRAL_OR_ENUMERATION_TYPE_P (type));
return false;
}
if (TREE_CODE (expression) == TREE_LIST)
return any_value_dependent_elements_p (expression);
return value_dependent_expression_p (expression);
}
case SIZEOF_EXPR:
if (SIZEOF_EXPR_TYPE_P (expression))
return dependent_type_p (TREE_TYPE (TREE_OPERAND (expression, 0)));
/* FALLTHRU */
case ALIGNOF_EXPR:
case TYPEID_EXPR:
/* A `sizeof' expression is value-dependent if the operand is
type-dependent or is a pack expansion. */
expression = TREE_OPERAND (expression, 0);
if (PACK_EXPANSION_P (expression))
return true;
else if (TYPE_P (expression))
return dependent_type_p (expression);
return instantiation_dependent_uneval_expression_p (expression);
case AT_ENCODE_EXPR:
/* An 'encode' expression is value-dependent if the operand is
type-dependent. */
expression = TREE_OPERAND (expression, 0);
return dependent_type_p (expression);
case NOEXCEPT_EXPR:
expression = TREE_OPERAND (expression, 0);
return instantiation_dependent_uneval_expression_p (expression);
case SCOPE_REF:
/* All instantiation-dependent expressions should also be considered
value-dependent. */
return instantiation_dependent_scope_ref_p (expression);
case COMPONENT_REF:
return (value_dependent_expression_p (TREE_OPERAND (expression, 0))
|| value_dependent_expression_p (TREE_OPERAND (expression, 1)));
case NONTYPE_ARGUMENT_PACK:
/* A NONTYPE_ARGUMENT_PACK is value-dependent if any packed argument
is value-dependent. */
{
tree values = ARGUMENT_PACK_ARGS (expression);
int i, len = TREE_VEC_LENGTH (values);
for (i = 0; i < len; ++i)
if (value_dependent_expression_p (TREE_VEC_ELT (values, i)))
return true;
return false;
}
case TRAIT_EXPR:
{
tree type2 = TRAIT_EXPR_TYPE2 (expression);
return (dependent_type_p (TRAIT_EXPR_TYPE1 (expression))
|| (type2 ? dependent_type_p (type2) : false));
}
case MODOP_EXPR:
return ((value_dependent_expression_p (TREE_OPERAND (expression, 0)))
|| (value_dependent_expression_p (TREE_OPERAND (expression, 2))));
case ARRAY_REF:
return ((value_dependent_expression_p (TREE_OPERAND (expression, 0)))
|| (value_dependent_expression_p (TREE_OPERAND (expression, 1))));
case ADDR_EXPR:
{
tree op = TREE_OPERAND (expression, 0);
return (value_dependent_expression_p (op)
|| has_value_dependent_address (op));
}
case REQUIRES_EXPR:
/* Treat all requires-expressions as value-dependent so
we don't try to fold them. */
return true;
case TYPE_REQ:
return dependent_type_p (TREE_OPERAND (expression, 0));
case CALL_EXPR:
{
if (value_dependent_expression_p (CALL_EXPR_FN (expression)))
return true;
tree fn = get_callee_fndecl (expression);
int i, nargs;
nargs = call_expr_nargs (expression);
for (i = 0; i < nargs; ++i)
{
tree op = CALL_EXPR_ARG (expression, i);
/* In a call to a constexpr member function, look through the
implicit ADDR_EXPR on the object argument so that it doesn't
cause the call to be considered value-dependent. We also
look through it in potential_constant_expression. */
if (i == 0 && fn && DECL_DECLARED_CONSTEXPR_P (fn)
&& DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)
&& TREE_CODE (op) == ADDR_EXPR)
op = TREE_OPERAND (op, 0);
if (value_dependent_expression_p (op))
return true;
}
return false;
}
case TEMPLATE_ID_EXPR:
/* If a TEMPLATE_ID_EXPR involves a dependent name, it will be
type-dependent. */
return type_dependent_expression_p (expression)
|| variable_concept_p (TREE_OPERAND (expression, 0));
case CONSTRUCTOR:
{
unsigned ix;
tree val;
if (dependent_type_p (TREE_TYPE (expression)))
return true;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), ix, val)
if (value_dependent_expression_p (val))
return true;
return false;
}
case STMT_EXPR:
/* Treat a GNU statement expression as dependent to avoid crashing
under instantiate_non_dependent_expr; it can't be constant. */
return true;
default:
/* A constant expression is value-dependent if any subexpression is
value-dependent. */
switch (TREE_CODE_CLASS (TREE_CODE (expression)))
{
case tcc_reference:
case tcc_unary:
case tcc_comparison:
case tcc_binary:
case tcc_expression:
case tcc_vl_exp:
{
int i, len = cp_tree_operand_length (expression);
for (i = 0; i < len; i++)
{
tree t = TREE_OPERAND (expression, i);
/* In some cases, some of the operands may be missing.l
(For example, in the case of PREDECREMENT_EXPR, the
amount to increment by may be missing.) That doesn't
make the expression dependent. */
if (t && value_dependent_expression_p (t))
return true;
}
}
break;
default:
break;
}
break;
}
/* The expression is not value-dependent. */
return false;
}
/* Returns TRUE if the EXPRESSION is type-dependent, in the sense of
[temp.dep.expr]. Note that an expression with no type is
considered dependent. Other parts of the compiler arrange for an
expression with type-dependent subexpressions to have no type, so
this function doesn't have to be fully recursive. */
bool
type_dependent_expression_p (tree expression)
{
if (!processing_template_decl)
return false;
if (expression == NULL_TREE || expression == error_mark_node)
return false;
/* An unresolved name is always dependent. */
if (identifier_p (expression)
|| TREE_CODE (expression) == USING_DECL
|| TREE_CODE (expression) == WILDCARD_DECL)
return true;
/* A fold expression is type-dependent. */
if (TREE_CODE (expression) == UNARY_LEFT_FOLD_EXPR
|| TREE_CODE (expression) == UNARY_RIGHT_FOLD_EXPR
|| TREE_CODE (expression) == BINARY_LEFT_FOLD_EXPR
|| TREE_CODE (expression) == BINARY_RIGHT_FOLD_EXPR)
return true;
/* Some expression forms are never type-dependent. */
if (TREE_CODE (expression) == PSEUDO_DTOR_EXPR
|| TREE_CODE (expression) == SIZEOF_EXPR
|| TREE_CODE (expression) == ALIGNOF_EXPR
|| TREE_CODE (expression) == AT_ENCODE_EXPR
|| TREE_CODE (expression) == NOEXCEPT_EXPR
|| TREE_CODE (expression) == TRAIT_EXPR
|| TREE_CODE (expression) == TYPEID_EXPR
|| TREE_CODE (expression) == DELETE_EXPR
|| TREE_CODE (expression) == VEC_DELETE_EXPR
|| TREE_CODE (expression) == THROW_EXPR
|| TREE_CODE (expression) == REQUIRES_EXPR)
return false;
/* The types of these expressions depends only on the type to which
the cast occurs. */
if (TREE_CODE (expression) == DYNAMIC_CAST_EXPR
|| TREE_CODE (expression) == STATIC_CAST_EXPR
|| TREE_CODE (expression) == CONST_CAST_EXPR
|| TREE_CODE (expression) == REINTERPRET_CAST_EXPR
|| TREE_CODE (expression) == IMPLICIT_CONV_EXPR
|| TREE_CODE (expression) == CAST_EXPR)
return dependent_type_p (TREE_TYPE (expression));
/* The types of these expressions depends only on the type created
by the expression. */
if (TREE_CODE (expression) == NEW_EXPR
|| TREE_CODE (expression) == VEC_NEW_EXPR)
{
/* For NEW_EXPR tree nodes created inside a template, either
the object type itself or a TREE_LIST may appear as the
operand 1. */
tree type = TREE_OPERAND (expression, 1);
if (TREE_CODE (type) == TREE_LIST)
/* This is an array type. We need to check array dimensions
as well. */
return dependent_type_p (TREE_VALUE (TREE_PURPOSE (type)))
|| value_dependent_expression_p
(TREE_OPERAND (TREE_VALUE (type), 1));
else
return dependent_type_p (type);
}
if (TREE_CODE (expression) == SCOPE_REF)
{
tree scope = TREE_OPERAND (expression, 0);
tree name = TREE_OPERAND (expression, 1);
/* 14.6.2.2 [temp.dep.expr]: An id-expression is type-dependent if it
contains an identifier associated by name lookup with one or more
declarations declared with a dependent type, or...a
nested-name-specifier or qualified-id that names a member of an
unknown specialization. */
return (type_dependent_expression_p (name)
|| dependent_scope_p (scope));
}
if (TREE_CODE (expression) == TEMPLATE_DECL
&& !DECL_TEMPLATE_TEMPLATE_PARM_P (expression))
return uses_outer_template_parms (expression);
if (TREE_CODE (expression) == STMT_EXPR)
expression = stmt_expr_value_expr (expression);
if (BRACE_ENCLOSED_INITIALIZER_P (expression))
{
tree elt;
unsigned i;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), i, elt)
{
if (type_dependent_expression_p (elt))
return true;
}
return false;
}
/* A static data member of the current instantiation with incomplete
array type is type-dependent, as the definition and specializations
can have different bounds. */
if (VAR_P (expression)
&& DECL_CLASS_SCOPE_P (expression)
&& dependent_type_p (DECL_CONTEXT (expression))
&& VAR_HAD_UNKNOWN_BOUND (expression))
return true;
/* An array of unknown bound depending on a variadic parameter, eg:
template<typename... Args>
void foo (Args... args)
{
int arr[] = { args... };
}
template<int... vals>
void bar ()
{
int arr[] = { vals... };
}
If the array has no length and has an initializer, it must be that
we couldn't determine its length in cp_complete_array_type because
it is dependent. */
if (VAR_P (expression)
&& TREE_TYPE (expression) != NULL_TREE
&& TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (expression))
&& DECL_INITIAL (expression))
return true;
/* A function or variable template-id is type-dependent if it has any
dependent template arguments. */
if (VAR_OR_FUNCTION_DECL_P (expression)
&& DECL_LANG_SPECIFIC (expression)
&& DECL_TEMPLATE_INFO (expression))
{
/* Consider the innermost template arguments, since those are the ones
that come from the template-id; the template arguments for the
enclosing class do not make it type-dependent unless they are used in
the type of the decl. */
if (PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (expression))
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (expression)))))
return true;
}
/* Otherwise, if the decl isn't from a dependent scope, it can't be
type-dependent. Checking this is important for functions with auto
return type, which looks like a dependent type. */
if (TREE_CODE (expression) == FUNCTION_DECL
&& undeduced_auto_decl (expression)
&& (!DECL_CLASS_SCOPE_P (expression)
|| !dependent_type_p (DECL_CONTEXT (expression)))
&& (!DECL_LANG_SPECIFIC (expression)
|| !DECL_FRIEND_CONTEXT (expression)
|| !dependent_type_p (DECL_FRIEND_CONTEXT (expression)))
&& !DECL_LOCAL_FUNCTION_P (expression))
{
return false;
}
/* Always dependent, on the number of arguments if nothing else. */
if (TREE_CODE (expression) == EXPR_PACK_EXPANSION)
return true;
if (TREE_TYPE (expression) == unknown_type_node)
{
if (TREE_CODE (expression) == ADDR_EXPR)
return type_dependent_expression_p (TREE_OPERAND (expression, 0));
if (TREE_CODE (expression) == COMPONENT_REF
|| TREE_CODE (expression) == OFFSET_REF)
{
if (type_dependent_expression_p (TREE_OPERAND (expression, 0)))
return true;
expression = TREE_OPERAND (expression, 1);
if (identifier_p (expression))
return false;
}
/* SCOPE_REF with non-null TREE_TYPE is always non-dependent. */
if (TREE_CODE (expression) == SCOPE_REF)
return false;
if (BASELINK_P (expression))
{
if (BASELINK_OPTYPE (expression)
&& dependent_type_p (BASELINK_OPTYPE (expression)))
return true;
expression = BASELINK_FUNCTIONS (expression);
}
if (TREE_CODE (expression) == TEMPLATE_ID_EXPR)
{
if (any_dependent_template_arguments_p
(TREE_OPERAND (expression, 1)))
return true;
expression = TREE_OPERAND (expression, 0);
if (identifier_p (expression))
return true;
}
gcc_assert (TREE_CODE (expression) == OVERLOAD
|| TREE_CODE (expression) == FUNCTION_DECL);
while (expression)
{
if (type_dependent_expression_p (OVL_CURRENT (expression)))
return true;
expression = OVL_NEXT (expression);
}
return false;
}
gcc_assert (TREE_CODE (expression) != TYPE_DECL);
/* Dependent type attributes might not have made it from the decl to
the type yet. */
if (DECL_P (expression)
&& any_dependent_type_attributes_p (DECL_ATTRIBUTES (expression)))
return true;
return (dependent_type_p (TREE_TYPE (expression)));
}
/* [temp.dep.expr]/5: A class member access expression (5.2.5) is
type-dependent if the expression refers to a member of the current
instantiation and the type of the referenced member is dependent, or the
class member access expression refers to a member of an unknown
specialization.
This function returns true if the OBJECT in such a class member access
expression is of an unknown specialization. */
bool
type_dependent_object_expression_p (tree object)
{
/* An IDENTIFIER_NODE can sometimes have a TREE_TYPE, but it's still
dependent. */
if (TREE_CODE (object) == IDENTIFIER_NODE)
return true;
tree scope = TREE_TYPE (object);
return (!scope || dependent_scope_p (scope));
}
/* walk_tree callback function for instantiation_dependent_expression_p,
below. Returns non-zero if a dependent subexpression is found. */
static tree
instantiation_dependent_r (tree *tp, int *walk_subtrees,
void * /*data*/)
{
if (TYPE_P (*tp))
{
/* We don't have to worry about decltype currently because decltype
of an instantiation-dependent expr is a dependent type. This
might change depending on the resolution of DR 1172. */
*walk_subtrees = false;
return NULL_TREE;
}
enum tree_code code = TREE_CODE (*tp);
switch (code)
{
/* Don't treat an argument list as dependent just because it has no
TREE_TYPE. */
case TREE_LIST:
case TREE_VEC:
return NULL_TREE;
case TEMPLATE_PARM_INDEX:
return *tp;
/* Handle expressions with type operands. */
case SIZEOF_EXPR:
case ALIGNOF_EXPR:
case TYPEID_EXPR:
case AT_ENCODE_EXPR:
{
tree op = TREE_OPERAND (*tp, 0);
if (code == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (*tp))
op = TREE_TYPE (op);
if (TYPE_P (op))
{
if (dependent_type_p (op))
return *tp;
else
{
*walk_subtrees = false;
return NULL_TREE;
}
}
break;
}
case COMPONENT_REF:
if (identifier_p (TREE_OPERAND (*tp, 1)))
/* In a template, finish_class_member_access_expr creates a
COMPONENT_REF with an IDENTIFIER_NODE for op1 even if it isn't
type-dependent, so that we can check access control at
instantiation time (PR 42277). See also Core issue 1273. */
return *tp;
break;
case SCOPE_REF:
if (instantiation_dependent_scope_ref_p (*tp))
return *tp;
else
break;
/* Treat statement-expressions as dependent. */
case BIND_EXPR:
return *tp;
/* Treat requires-expressions as dependent. */
case REQUIRES_EXPR:
return *tp;
case CALL_EXPR:
/* Treat calls to function concepts as dependent. */
if (function_concept_check_p (*tp))
return *tp;
break;
case TEMPLATE_ID_EXPR:
/* And variable concepts. */
if (variable_concept_p (TREE_OPERAND (*tp, 0)))
return *tp;
break;
default:
break;
}
if (type_dependent_expression_p (*tp))
return *tp;
else
return NULL_TREE;
}
/* Returns TRUE if the EXPRESSION is instantiation-dependent, in the
sense defined by the ABI:
"An expression is instantiation-dependent if it is type-dependent
or value-dependent, or it has a subexpression that is type-dependent
or value-dependent."
Except don't actually check value-dependence for unevaluated expressions,
because in sizeof(i) we don't care about the value of i. Checking
type-dependence will in turn check value-dependence of array bounds/template
arguments as needed. */
bool
instantiation_dependent_uneval_expression_p (tree expression)
{
tree result;
if (!processing_template_decl)
return false;
if (expression == error_mark_node)
return false;
result = cp_walk_tree_without_duplicates (&expression,
instantiation_dependent_r, NULL);
return result != NULL_TREE;
}
/* As above, but also check value-dependence of the expression as a whole. */
bool
instantiation_dependent_expression_p (tree expression)
{
return (instantiation_dependent_uneval_expression_p (expression)
|| value_dependent_expression_p (expression));
}
/* Like type_dependent_expression_p, but it also works while not processing
a template definition, i.e. during substitution or mangling. */
bool
type_dependent_expression_p_push (tree expr)
{
bool b;
++processing_template_decl;
b = type_dependent_expression_p (expr);
--processing_template_decl;
return b;
}
/* Returns TRUE if ARGS contains a type-dependent expression. */
bool
any_type_dependent_arguments_p (const vec<tree, va_gc> *args)
{
unsigned int i;
tree arg;
FOR_EACH_VEC_SAFE_ELT (args, i, arg)
{
if (type_dependent_expression_p (arg))
return true;
}
return false;
}
/* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are
expressions) contains any type-dependent expressions. */
bool
any_type_dependent_elements_p (const_tree list)
{
for (; list; list = TREE_CHAIN (list))
if (type_dependent_expression_p (TREE_VALUE (list)))
return true;
return false;
}
/* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are
expressions) contains any value-dependent expressions. */
bool
any_value_dependent_elements_p (const_tree list)
{
for (; list; list = TREE_CHAIN (list))
if (value_dependent_expression_p (TREE_VALUE (list)))
return true;
return false;
}
/* Returns TRUE if the ARG (a template argument) is dependent. */
bool
dependent_template_arg_p (tree arg)
{
if (!processing_template_decl)
return false;
/* Assume a template argument that was wrongly written by the user
is dependent. This is consistent with what
any_dependent_template_arguments_p [that calls this function]
does. */
if (!arg || arg == error_mark_node)
return true;
if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT)
arg = ARGUMENT_PACK_SELECT_ARG (arg);
if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM)
return true;
if (TREE_CODE (arg) == TEMPLATE_DECL)
{
if (DECL_TEMPLATE_PARM_P (arg))
return true;
/* A member template of a dependent class is not necessarily
type-dependent, but it is a dependent template argument because it
will be a member of an unknown specialization to that template. */
tree scope = CP_DECL_CONTEXT (arg);
return TYPE_P (scope) && dependent_type_p (scope);
}
else if (ARGUMENT_PACK_P (arg))
{
tree args = ARGUMENT_PACK_ARGS (arg);
int i, len = TREE_VEC_LENGTH (args);
for (i = 0; i < len; ++i)
{
if (dependent_template_arg_p (TREE_VEC_ELT (args, i)))
return true;
}
return false;
}
else if (TYPE_P (arg))
return dependent_type_p (arg);
else
return (type_dependent_expression_p (arg)
|| value_dependent_expression_p (arg));
}
/* Returns true if ARGS (a collection of template arguments) contains
any types that require structural equality testing. */
bool
any_template_arguments_need_structural_equality_p (tree args)
{
int i;
int j;
if (!args)
return false;
if (args == error_mark_node)
return true;
for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i)
{
tree level = TMPL_ARGS_LEVEL (args, i + 1);
for (j = 0; j < TREE_VEC_LENGTH (level); ++j)
{
tree arg = TREE_VEC_ELT (level, j);
tree packed_args = NULL_TREE;
int k, len = 1;
if (ARGUMENT_PACK_P (arg))
{
/* Look inside the argument pack. */
packed_args = ARGUMENT_PACK_ARGS (arg);
len = TREE_VEC_LENGTH (packed_args);
}
for (k = 0; k < len; ++k)
{
if (packed_args)
arg = TREE_VEC_ELT (packed_args, k);
if (error_operand_p (arg))
return true;
else if (TREE_CODE (arg) == TEMPLATE_DECL)
continue;
else if (TYPE_P (arg) && TYPE_STRUCTURAL_EQUALITY_P (arg))
return true;
else if (!TYPE_P (arg) && TREE_TYPE (arg)
&& TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (arg)))
return true;
}
}
}
return false;
}
/* Returns true if ARGS (a collection of template arguments) contains
any dependent arguments. */
bool
any_dependent_template_arguments_p (const_tree args)
{
int i;
int j;
if (!args)
return false;
if (args == error_mark_node)
return true;
for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i)
{
const_tree level = TMPL_ARGS_LEVEL (args, i + 1);
for (j = 0; j < TREE_VEC_LENGTH (level); ++j)
if (dependent_template_arg_p (TREE_VEC_ELT (level, j)))
return true;
}
return false;
}
/* Returns TRUE if the template TMPL is type-dependent. */
bool
dependent_template_p (tree tmpl)
{
if (TREE_CODE (tmpl) == OVERLOAD)
{
while (tmpl)
{
if (dependent_template_p (OVL_CURRENT (tmpl)))
return true;
tmpl = OVL_NEXT (tmpl);
}
return false;
}
/* Template template parameters are dependent. */
if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)
|| TREE_CODE (tmpl) == TEMPLATE_TEMPLATE_PARM)
return true;
/* So are names that have not been looked up. */
if (TREE_CODE (tmpl) == SCOPE_REF || identifier_p (tmpl))
return true;
return false;
}
/* Returns TRUE if the specialization TMPL<ARGS> is dependent. */
bool
dependent_template_id_p (tree tmpl, tree args)
{
return (dependent_template_p (tmpl)
|| any_dependent_template_arguments_p (args));
}
/* Returns TRUE if OMP_FOR with DECLV, INITV, CONDV and INCRV vectors
are dependent. */
bool
dependent_omp_for_p (tree declv, tree initv, tree condv, tree incrv)
{
int i;
if (!processing_template_decl)
return false;
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
if (type_dependent_expression_p (decl)
|| TREE_CODE (decl) == SCOPE_REF)
return true;
if (init && type_dependent_expression_p (init))
return true;
if (type_dependent_expression_p (cond))
return true;
if (COMPARISON_CLASS_P (cond)
&& (type_dependent_expression_p (TREE_OPERAND (cond, 0))
|| type_dependent_expression_p (TREE_OPERAND (cond, 1))))
return true;
if (TREE_CODE (incr) == MODOP_EXPR)
{
if (type_dependent_expression_p (TREE_OPERAND (incr, 0))
|| type_dependent_expression_p (TREE_OPERAND (incr, 2)))
return true;
}
else if (type_dependent_expression_p (incr))
return true;
else if (TREE_CODE (incr) == MODIFY_EXPR)
{
if (type_dependent_expression_p (TREE_OPERAND (incr, 0)))
return true;
else if (BINARY_CLASS_P (TREE_OPERAND (incr, 1)))
{
tree t = TREE_OPERAND (incr, 1);
if (type_dependent_expression_p (TREE_OPERAND (t, 0))
|| type_dependent_expression_p (TREE_OPERAND (t, 1)))
return true;
}
}
}
return false;
}
/* TYPE is a TYPENAME_TYPE. Returns the ordinary TYPE to which the
TYPENAME_TYPE corresponds. Returns the original TYPENAME_TYPE if
no such TYPE can be found. Note that this function peers inside
uninstantiated templates and therefore should be used only in
extremely limited situations. ONLY_CURRENT_P restricts this
peering to the currently open classes hierarchy (which is required
when comparing types). */
tree
resolve_typename_type (tree type, bool only_current_p)
{
tree scope;
tree name;
tree decl;
int quals;
tree pushed_scope;
tree result;
gcc_assert (TREE_CODE (type) == TYPENAME_TYPE);
scope = TYPE_CONTEXT (type);
/* Usually the non-qualified identifier of a TYPENAME_TYPE is
TYPE_IDENTIFIER (type). But when 'type' is a typedef variant of
a TYPENAME_TYPE node, then TYPE_NAME (type) is set to the TYPE_DECL representing
the typedef. In that case TYPE_IDENTIFIER (type) is not the non-qualified
identifier of the TYPENAME_TYPE anymore.
So by getting the TYPE_IDENTIFIER of the _main declaration_ of the
TYPENAME_TYPE instead, we avoid messing up with a possible
typedef variant case. */
name = TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (type));
/* If the SCOPE is itself a TYPENAME_TYPE, then we need to resolve
it first before we can figure out what NAME refers to. */
if (TREE_CODE (scope) == TYPENAME_TYPE)
{
if (TYPENAME_IS_RESOLVING_P (scope))
/* Given a class template A with a dependent base with nested type C,
typedef typename A::C::C C will land us here, as trying to resolve
the initial A::C leads to the local C typedef, which leads back to
A::C::C. So we break the recursion now. */
return type;
else
scope = resolve_typename_type (scope, only_current_p);
}
/* If we don't know what SCOPE refers to, then we cannot resolve the
TYPENAME_TYPE. */
if (!CLASS_TYPE_P (scope))
return type;
/* If this is a typedef, we don't want to look inside (c++/11987). */
if (typedef_variant_p (type))
return type;
/* If SCOPE isn't the template itself, it will not have a valid
TYPE_FIELDS list. */
if (same_type_p (scope, CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope)))
/* scope is either the template itself or a compatible instantiation
like X<T>, so look up the name in the original template. */
scope = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope);
/* We shouldn't have built a TYPENAME_TYPE with a non-dependent scope. */
gcc_checking_assert (uses_template_parms (scope));
/* If scope has no fields, it can't be a current instantiation. Check this
before currently_open_class to avoid infinite recursion (71515). */
if (!TYPE_FIELDS (scope))
return type;
/* If the SCOPE is not the current instantiation, there's no reason
to look inside it. */
if (only_current_p && !currently_open_class (scope))
return type;
/* Enter the SCOPE so that name lookup will be resolved as if we
were in the class definition. In particular, SCOPE will no
longer be considered a dependent type. */
pushed_scope = push_scope (scope);
/* Look up the declaration. */
decl = lookup_member (scope, name, /*protect=*/0, /*want_type=*/true,
tf_warning_or_error);
result = NULL_TREE;
/* For a TYPENAME_TYPE like "typename X::template Y<T>", we want to
find a TEMPLATE_DECL. Otherwise, we want to find a TYPE_DECL. */
if (!decl)
/*nop*/;
else if (identifier_p (TYPENAME_TYPE_FULLNAME (type))
&& TREE_CODE (decl) == TYPE_DECL)
{
result = TREE_TYPE (decl);
if (result == error_mark_node)
result = NULL_TREE;
}
else if (TREE_CODE (TYPENAME_TYPE_FULLNAME (type)) == TEMPLATE_ID_EXPR
&& DECL_CLASS_TEMPLATE_P (decl))
{
tree tmpl;
tree args;
/* Obtain the template and the arguments. */
tmpl = TREE_OPERAND (TYPENAME_TYPE_FULLNAME (type), 0);
args = TREE_OPERAND (TYPENAME_TYPE_FULLNAME (type), 1);
/* Instantiate the template. */
result = lookup_template_class (tmpl, args, NULL_TREE, NULL_TREE,
/*entering_scope=*/0,
tf_error | tf_user);
if (result == error_mark_node)
result = NULL_TREE;
}
/* Leave the SCOPE. */
if (pushed_scope)
pop_scope (pushed_scope);
/* If we failed to resolve it, return the original typename. */
if (!result)
return type;
/* If lookup found a typename type, resolve that too. */
if (TREE_CODE (result) == TYPENAME_TYPE && !TYPENAME_IS_RESOLVING_P (result))
{
/* Ill-formed programs can cause infinite recursion here, so we
must catch that. */
TYPENAME_IS_RESOLVING_P (result) = 1;
result = resolve_typename_type (result, only_current_p);
TYPENAME_IS_RESOLVING_P (result) = 0;
}
/* Qualify the resulting type. */
quals = cp_type_quals (type);
if (quals)
result = cp_build_qualified_type (result, cp_type_quals (result) | quals);
return result;
}
/* EXPR is an expression which is not type-dependent. Return a proxy
for EXPR that can be used to compute the types of larger
expressions containing EXPR. */
tree
build_non_dependent_expr (tree expr)
{
tree inner_expr;
/* When checking, try to get a constant value for all non-dependent
expressions in order to expose bugs in *_dependent_expression_p
and constexpr. This can affect code generation, see PR70704, so
only do this for -fchecking=2. */
if (flag_checking > 1
&& cxx_dialect >= cxx11
/* Don't do this during nsdmi parsing as it can lead to
unexpected recursive instantiations. */
&& !parsing_nsdmi ()
/* Don't do this during concept expansion either and for
the same reason. */
&& !expanding_concept ())
fold_non_dependent_expr (expr);
/* Preserve OVERLOADs; the functions must be available to resolve
types. */
inner_expr = expr;
if (TREE_CODE (inner_expr) == STMT_EXPR)
inner_expr = stmt_expr_value_expr (inner_expr);
if (TREE_CODE (inner_expr) == ADDR_EXPR)
inner_expr = TREE_OPERAND (inner_expr, 0);
if (TREE_CODE (inner_expr) == COMPONENT_REF)
inner_expr = TREE_OPERAND (inner_expr, 1);
if (is_overloaded_fn (inner_expr)
|| TREE_CODE (inner_expr) == OFFSET_REF)
return expr;
/* There is no need to return a proxy for a variable. */
if (VAR_P (expr))
return expr;
/* Preserve string constants; conversions from string constants to
"char *" are allowed, even though normally a "const char *"
cannot be used to initialize a "char *". */
if (TREE_CODE (expr) == STRING_CST)
return expr;
/* Preserve void and arithmetic constants, as an optimization -- there is no
reason to create a new node. */
if (TREE_CODE (expr) == VOID_CST
|| TREE_CODE (expr) == INTEGER_CST
|| TREE_CODE (expr) == REAL_CST)
return expr;
/* Preserve THROW_EXPRs -- all throw-expressions have type "void".
There is at least one place where we want to know that a
particular expression is a throw-expression: when checking a ?:
expression, there are special rules if the second or third
argument is a throw-expression. */
if (TREE_CODE (expr) == THROW_EXPR)
return expr;
/* Don't wrap an initializer list, we need to be able to look inside. */
if (BRACE_ENCLOSED_INITIALIZER_P (expr))
return expr;
/* Don't wrap a dummy object, we need to be able to test for it. */
if (is_dummy_object (expr))
return expr;
if (TREE_CODE (expr) == COND_EXPR)
return build3 (COND_EXPR,
TREE_TYPE (expr),
TREE_OPERAND (expr, 0),
(TREE_OPERAND (expr, 1)
? build_non_dependent_expr (TREE_OPERAND (expr, 1))
: build_non_dependent_expr (TREE_OPERAND (expr, 0))),
build_non_dependent_expr (TREE_OPERAND (expr, 2)));
if (TREE_CODE (expr) == COMPOUND_EXPR
&& !COMPOUND_EXPR_OVERLOADED (expr))
return build2 (COMPOUND_EXPR,
TREE_TYPE (expr),
TREE_OPERAND (expr, 0),
build_non_dependent_expr (TREE_OPERAND (expr, 1)));
/* If the type is unknown, it can't really be non-dependent */
gcc_assert (TREE_TYPE (expr) != unknown_type_node);
/* Otherwise, build a NON_DEPENDENT_EXPR. */
return build1 (NON_DEPENDENT_EXPR, TREE_TYPE (expr), expr);
}
/* ARGS is a vector of expressions as arguments to a function call.
Replace the arguments with equivalent non-dependent expressions.
This modifies ARGS in place. */
void
make_args_non_dependent (vec<tree, va_gc> *args)
{
unsigned int ix;
tree arg;
FOR_EACH_VEC_SAFE_ELT (args, ix, arg)
{
tree newarg = build_non_dependent_expr (arg);
if (newarg != arg)
(*args)[ix] = newarg;
}
}
/* Returns a type which represents 'auto' or 'decltype(auto)'. We use a
TEMPLATE_TYPE_PARM with a level one deeper than the actual template
parms. If set_canonical is true, we set TYPE_CANONICAL on it. */
static tree
make_auto_1 (tree name, bool set_canonical)
{
tree au = cxx_make_type (TEMPLATE_TYPE_PARM);
TYPE_NAME (au) = build_decl (input_location,
TYPE_DECL, name, au);
TYPE_STUB_DECL (au) = TYPE_NAME (au);
TEMPLATE_TYPE_PARM_INDEX (au) = build_template_parm_index
(0, processing_template_decl + 1, processing_template_decl + 1,
TYPE_NAME (au), NULL_TREE);
if (set_canonical)
TYPE_CANONICAL (au) = canonical_type_parameter (au);
DECL_ARTIFICIAL (TYPE_NAME (au)) = 1;
SET_DECL_TEMPLATE_PARM_P (TYPE_NAME (au));
return au;
}
tree
make_decltype_auto (void)
{
return make_auto_1 (decltype_auto_identifier, true);
}
tree
make_auto (void)
{
return make_auto_1 (auto_identifier, true);
}
/* Return a C++17 deduction placeholder for class template TMPL. */
tree
make_template_placeholder (tree tmpl)
{
tree t = make_auto_1 (DECL_NAME (tmpl), true);
CLASS_PLACEHOLDER_TEMPLATE (t) = tmpl;
return t;
}
/* True iff T is a C++17 class template deduction placeholder. */
bool
template_placeholder_p (tree t)
{
return is_auto (t) && CLASS_PLACEHOLDER_TEMPLATE (t);
}
/* Make a "constrained auto" type-specifier. This is an
auto type with constraints that must be associated after
deduction. The constraint is formed from the given
CONC and its optional sequence of arguments, which are
non-null if written as partial-concept-id. */
tree
make_constrained_auto (tree con, tree args)
{
tree type = make_auto_1 (auto_identifier, false);
/* Build the constraint. */
tree tmpl = DECL_TI_TEMPLATE (con);
tree expr;
if (VAR_P (con))
expr = build_concept_check (tmpl, type, args);
else
expr = build_concept_check (build_overload (tmpl, NULL_TREE), type, args);
tree constr = normalize_expression (expr);
PLACEHOLDER_TYPE_CONSTRAINTS (type) = constr;
/* Our canonical type depends on the constraint. */
TYPE_CANONICAL (type) = canonical_type_parameter (type);
/* Attach the constraint to the type declaration. */
tree decl = TYPE_NAME (type);
return decl;
}
/* Given type ARG, return std::initializer_list<ARG>. */
static tree
listify (tree arg)
{
tree std_init_list = namespace_binding
(get_identifier ("initializer_list"), std_node);
tree argvec;
if (!std_init_list || !DECL_CLASS_TEMPLATE_P (std_init_list))
{
error ("deducing from brace-enclosed initializer list requires "
"#include <initializer_list>");
return error_mark_node;
}
argvec = make_tree_vec (1);
TREE_VEC_ELT (argvec, 0) = arg;
return lookup_template_class (std_init_list, argvec, NULL_TREE,
NULL_TREE, 0, tf_warning_or_error);
}
/* Replace auto in TYPE with std::initializer_list<auto>. */
static tree
listify_autos (tree type, tree auto_node)
{
tree init_auto = listify (auto_node);
tree argvec = make_tree_vec (1);
TREE_VEC_ELT (argvec, 0) = init_auto;
if (processing_template_decl)
argvec = add_to_template_args (current_template_args (), argvec);
return tsubst (type, argvec, tf_warning_or_error, NULL_TREE);
}
/* Hash traits for hashing possibly constrained 'auto'
TEMPLATE_TYPE_PARMs for use by do_auto_deduction. */
struct auto_hash : default_hash_traits<tree>
{
static inline hashval_t hash (tree);
static inline bool equal (tree, tree);
};
/* Hash the 'auto' T. */
inline hashval_t
auto_hash::hash (tree t)
{
if (tree c = PLACEHOLDER_TYPE_CONSTRAINTS (t))
/* Matching constrained-type-specifiers denote the same template
parameter, so hash the constraint. */
return hash_placeholder_constraint (c);
else
/* But unconstrained autos are all separate, so just hash the pointer. */
return iterative_hash_object (t, 0);
}
/* Compare two 'auto's. */
inline bool
auto_hash::equal (tree t1, tree t2)
{
if (t1 == t2)
return true;
tree c1 = PLACEHOLDER_TYPE_CONSTRAINTS (t1);
tree c2 = PLACEHOLDER_TYPE_CONSTRAINTS (t2);
/* Two unconstrained autos are distinct. */
if (!c1 || !c2)
return false;
return equivalent_placeholder_constraints (c1, c2);
}
/* for_each_template_parm callback for extract_autos: if t is a (possibly
constrained) auto, add it to the vector. */
static int
extract_autos_r (tree t, void *data)
{
hash_table<auto_hash> &hash = *(hash_table<auto_hash>*)data;
if (is_auto_or_concept (t))
{
/* All the autos were built with index 0; fix that up now. */
tree *p = hash.find_slot (t, INSERT);
unsigned idx;
if (*p)
/* If this is a repeated constrained-type-specifier, use the index we
chose before. */
idx = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (*p));
else
{
/* Otherwise this is new, so use the current count. */
*p = t;
idx = hash.elements () - 1;
}
TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (t)) = idx;
}
/* Always keep walking. */
return 0;
}
/* Return a TREE_VEC of the 'auto's used in type under the Concepts TS, which
says they can appear anywhere in the type. */
static tree
extract_autos (tree type)
{
hash_set<tree> visited;
hash_table<auto_hash> hash (2);
for_each_template_parm (type, extract_autos_r, &hash, &visited, true);
tree tree_vec = make_tree_vec (hash.elements());
for (hash_table<auto_hash>::iterator iter = hash.begin();
iter != hash.end(); ++iter)
{
tree elt = *iter;
unsigned i = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (elt));
TREE_VEC_ELT (tree_vec, i)
= build_tree_list (NULL_TREE, TYPE_NAME (elt));
}
return tree_vec;
}
/* The stem for deduction guide names. */
const char *const dguide_base = "__dguide_";
/* Return the name for a deduction guide for class template TMPL. */
tree
dguide_name (tree tmpl)
{
tree type = (TYPE_P (tmpl) ? tmpl : TREE_TYPE (tmpl));
tree tname = TYPE_IDENTIFIER (type);
char *buf = (char *) alloca (1 + strlen (dguide_base)
+ IDENTIFIER_LENGTH (tname));
memcpy (buf, dguide_base, strlen (dguide_base));
memcpy (buf + strlen (dguide_base), IDENTIFIER_POINTER (tname),
IDENTIFIER_LENGTH (tname) + 1);
tree dname = get_identifier (buf);
TREE_TYPE (dname) = type;
return dname;
}
/* True if NAME is the name of a deduction guide. */
bool
dguide_name_p (tree name)
{
return (TREE_TYPE (name)
&& !strncmp (IDENTIFIER_POINTER (name), dguide_base,
strlen (dguide_base)));
}
/* True if FN is a deduction guide. */
bool
deduction_guide_p (const_tree fn)
{
if (DECL_P (fn))
if (tree name = DECL_NAME (fn))
return dguide_name_p (name);
return false;
}
/* True if FN is the copy deduction guide, i.e. A(A)->A. */
bool
copy_guide_p (const_tree fn)
{
gcc_assert (deduction_guide_p (fn));
if (!DECL_ARTIFICIAL (fn))
return false;
tree parms = FUNCTION_FIRST_USER_PARMTYPE (DECL_TI_TEMPLATE (fn));
return (TREE_CHAIN (parms) == void_list_node
&& same_type_p (TREE_VALUE (parms), TREE_TYPE (DECL_NAME (fn))));
}
/* True if FN is a guide generated from a constructor template. */
bool
template_guide_p (const_tree fn)
{
gcc_assert (deduction_guide_p (fn));
if (!DECL_ARTIFICIAL (fn))
return false;
if (tree ctor = DECL_ABSTRACT_ORIGIN (fn))
{
tree tmpl = DECL_TI_TEMPLATE (ctor);
return PRIMARY_TEMPLATE_P (tmpl);
}
return false;
}
/* OLDDECL is a _DECL for a template parameter. Return a similar parameter at
LEVEL:INDEX, using tsubst_args and complain for substitution into non-type
template parameter types. Note that the handling of template template
parameters relies on current_template_parms being set appropriately for the
new template. */
static tree
rewrite_template_parm (tree olddecl, unsigned index, unsigned level,
tree tsubst_args, tsubst_flags_t complain)
{
tree oldidx = get_template_parm_index (olddecl);
tree newtype;
if (TREE_CODE (olddecl) == TYPE_DECL
|| TREE_CODE (olddecl) == TEMPLATE_DECL)
{
tree oldtype = TREE_TYPE (olddecl);
newtype = cxx_make_type (TREE_CODE (oldtype));
TYPE_MAIN_VARIANT (newtype) = newtype;
if (TREE_CODE (oldtype) == TEMPLATE_TYPE_PARM)
TEMPLATE_TYPE_PARM_FOR_CLASS (newtype)
= TEMPLATE_TYPE_PARM_FOR_CLASS (oldtype);
}
else
newtype = tsubst (TREE_TYPE (olddecl), tsubst_args,
complain, NULL_TREE);
tree newdecl
= build_decl (DECL_SOURCE_LOCATION (olddecl), TREE_CODE (olddecl),
DECL_NAME (olddecl), newtype);
SET_DECL_TEMPLATE_PARM_P (newdecl);
tree newidx;
if (TREE_CODE (olddecl) == TYPE_DECL
|| TREE_CODE (olddecl) == TEMPLATE_DECL)
{
newidx = TEMPLATE_TYPE_PARM_INDEX (newtype)
= build_template_parm_index (index, level, level,
newdecl, newtype);
TEMPLATE_PARM_PARAMETER_PACK (newidx)
= TEMPLATE_PARM_PARAMETER_PACK (oldidx);
TYPE_STUB_DECL (newtype) = TYPE_NAME (newtype) = newdecl;
TYPE_CANONICAL (newtype) = canonical_type_parameter (newtype);
if (TREE_CODE (olddecl) == TEMPLATE_DECL)
{
DECL_TEMPLATE_RESULT (newdecl)
= build_decl (DECL_SOURCE_LOCATION (olddecl), TYPE_DECL,
DECL_NAME (olddecl), newtype);
DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (newdecl)) = true;
// First create a copy (ttargs) of tsubst_args with an
// additional level for the template template parameter's own
// template parameters (ttparms).
tree ttparms = (INNERMOST_TEMPLATE_PARMS
(DECL_TEMPLATE_PARMS (olddecl)));
const int depth = TMPL_ARGS_DEPTH (tsubst_args);
tree ttargs = make_tree_vec (depth + 1);
for (int i = 0; i < depth; ++i)
TREE_VEC_ELT (ttargs, i) = TREE_VEC_ELT (tsubst_args, i);
TREE_VEC_ELT (ttargs, depth)
= template_parms_level_to_args (ttparms);
// Substitute ttargs into ttparms to fix references to
// other template parameters.
ttparms = tsubst_template_parms_level (ttparms, ttargs,
complain|tf_partial);
// Now substitute again with args based on tparms, to reduce
// the level of the ttparms.
ttargs = current_template_args ();
ttparms = tsubst_template_parms_level (ttparms, ttargs,
complain);
// Finally, tack the adjusted parms onto tparms.
ttparms = tree_cons (size_int (depth), ttparms,
current_template_parms);
DECL_TEMPLATE_PARMS (newdecl) = ttparms;
}
}
else
{
tree oldconst = TEMPLATE_PARM_DECL (oldidx);
tree newconst
= build_decl (DECL_SOURCE_LOCATION (oldconst),
TREE_CODE (oldconst),
DECL_NAME (oldconst), newtype);
TREE_CONSTANT (newconst) = TREE_CONSTANT (newdecl)
= TREE_READONLY (newconst) = TREE_READONLY (newdecl) = true;
SET_DECL_TEMPLATE_PARM_P (newconst);
newidx = build_template_parm_index (index, level, level,
newconst, newtype);
TEMPLATE_PARM_PARAMETER_PACK (newidx)
= TEMPLATE_PARM_PARAMETER_PACK (oldidx);
DECL_INITIAL (newdecl) = DECL_INITIAL (newconst) = newidx;
}
return newdecl;
}
/* Returns a C++17 class deduction guide template based on the constructor
CTOR. As a special case, CTOR can be a RECORD_TYPE for an implicit default
guide, or REFERENCE_TYPE for an implicit copy/move guide. */
static tree
build_deduction_guide (tree ctor, tree outer_args, tsubst_flags_t complain)
{
tree type, tparms, targs, fparms, fargs, ci;
bool memtmpl = false;
bool explicit_p;
location_t loc;
if (TYPE_P (ctor))
{
type = ctor;
bool copy_p = TREE_CODE (type) == REFERENCE_TYPE;
if (copy_p)
{
type = TREE_TYPE (type);
fparms = tree_cons (NULL_TREE, type, void_list_node);
}
else
fparms = void_list_node;
tree ctmpl = CLASSTYPE_TI_TEMPLATE (type);
tparms = DECL_TEMPLATE_PARMS (ctmpl);
targs = CLASSTYPE_TI_ARGS (type);
ci = NULL_TREE;
fargs = NULL_TREE;
loc = DECL_SOURCE_LOCATION (ctmpl);
explicit_p = false;
}
else
{
++processing_template_decl;
tree fn_tmpl
= (TREE_CODE (ctor) == TEMPLATE_DECL ? ctor
: DECL_TI_TEMPLATE (ctor));
if (outer_args)
fn_tmpl = tsubst (fn_tmpl, outer_args, complain, ctor);
ctor = DECL_TEMPLATE_RESULT (fn_tmpl);
type = DECL_CONTEXT (ctor);
tparms = DECL_TEMPLATE_PARMS (fn_tmpl);
/* If type is a member class template, DECL_TI_ARGS (ctor) will have
fully specialized args for the enclosing class. Strip those off, as
the deduction guide won't have those template parameters. */
targs = get_innermost_template_args (DECL_TI_ARGS (ctor),
TMPL_PARMS_DEPTH (tparms));
/* Discard the 'this' parameter. */
fparms = FUNCTION_ARG_CHAIN (ctor);
fargs = TREE_CHAIN (DECL_ARGUMENTS (ctor));
ci = get_constraints (ctor);
loc = DECL_SOURCE_LOCATION (ctor);
explicit_p = DECL_NONCONVERTING_P (ctor);
if (PRIMARY_TEMPLATE_P (fn_tmpl))
{
memtmpl = true;
/* For a member template constructor, we need to flatten the two
template parameter lists into one, and then adjust the function
signature accordingly. This gets...complicated. */
tree save_parms = current_template_parms;
/* For a member template we should have two levels of parms/args, one
for the class and one for the constructor. We stripped
specialized args for further enclosing classes above. */
const int depth = 2;
gcc_assert (TMPL_ARGS_DEPTH (targs) == depth);
/* Template args for translating references to the two-level template
parameters into references to the one-level template parameters we
are creating. */
tree tsubst_args = copy_node (targs);
TMPL_ARGS_LEVEL (tsubst_args, depth)
= copy_node (TMPL_ARGS_LEVEL (tsubst_args, depth));
/* Template parms for the constructor template. */
tree ftparms = TREE_VALUE (tparms);
unsigned flen = TREE_VEC_LENGTH (ftparms);
/* Template parms for the class template. */
tparms = TREE_CHAIN (tparms);
tree ctparms = TREE_VALUE (tparms);
unsigned clen = TREE_VEC_LENGTH (ctparms);
/* Template parms for the deduction guide start as a copy of the
template parms for the class. We set current_template_parms for
lookup_template_class_1. */
current_template_parms = tparms = copy_node (tparms);
tree new_vec = TREE_VALUE (tparms) = make_tree_vec (flen + clen);
for (unsigned i = 0; i < clen; ++i)
TREE_VEC_ELT (new_vec, i) = TREE_VEC_ELT (ctparms, i);
/* Now we need to rewrite the constructor parms to append them to the
class parms. */
for (unsigned i = 0; i < flen; ++i)
{
unsigned index = i + clen;
unsigned level = 1;
tree oldelt = TREE_VEC_ELT (ftparms, i);
tree olddecl = TREE_VALUE (oldelt);
tree newdecl = rewrite_template_parm (olddecl, index, level,
tsubst_args, complain);
tree newdef = tsubst_template_arg (TREE_PURPOSE (oldelt),
tsubst_args, complain, ctor);
tree list = build_tree_list (newdef, newdecl);
TEMPLATE_PARM_CONSTRAINTS (list)
= tsubst_constraint_info (TEMPLATE_PARM_CONSTRAINTS (oldelt),
tsubst_args, complain, ctor);
TREE_VEC_ELT (new_vec, index) = list;
TMPL_ARG (tsubst_args, depth, i) = template_parm_to_arg (list);
}
/* Now we have a final set of template parms to substitute into the
function signature. */
targs = template_parms_to_args (tparms);
fparms = tsubst_arg_types (fparms, tsubst_args, NULL_TREE,
complain, ctor);
fargs = tsubst (fargs, tsubst_args, complain, ctor);
if (ci)
ci = tsubst_constraint_info (ci, tsubst_args, complain, ctor);
current_template_parms = save_parms;
}
--processing_template_decl;
}
if (!memtmpl)
{
/* Copy the parms so we can set DECL_PRIMARY_TEMPLATE. */
tparms = copy_node (tparms);
INNERMOST_TEMPLATE_PARMS (tparms)
= copy_node (INNERMOST_TEMPLATE_PARMS (tparms));
}
tree fntype = build_function_type (type, fparms);
tree ded_fn = build_lang_decl_loc (loc,
FUNCTION_DECL,
dguide_name (type), fntype);
DECL_ARGUMENTS (ded_fn) = fargs;
DECL_ARTIFICIAL (ded_fn) = true;
DECL_NONCONVERTING_P (ded_fn) = explicit_p;
tree ded_tmpl = build_template_decl (ded_fn, tparms, /*member*/false);
DECL_ARTIFICIAL (ded_tmpl) = true;
DECL_TEMPLATE_RESULT (ded_tmpl) = ded_fn;
TREE_TYPE (ded_tmpl) = TREE_TYPE (ded_fn);
DECL_TEMPLATE_INFO (ded_fn) = build_template_info (ded_tmpl, targs);
DECL_PRIMARY_TEMPLATE (ded_tmpl) = ded_tmpl;
if (DECL_P (ctor))
DECL_ABSTRACT_ORIGIN (ded_fn) = ctor;
if (ci)
set_constraints (ded_tmpl, ci);
return ded_tmpl;
}
/* Deduce template arguments for the class template placeholder PTYPE for
template TMPL based on the initializer INIT, and return the resulting
type. */
static tree
do_class_deduction (tree ptype, tree tmpl, tree init, int flags,
tsubst_flags_t complain)
{
if (!DECL_CLASS_TEMPLATE_P (tmpl))
{
/* We should have handled this in the caller. */
if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl))
return ptype;
if (complain & tf_error)
error ("non-class template %qT used without template arguments", tmpl);
return error_mark_node;
}
tree type = TREE_TYPE (tmpl);
vec<tree,va_gc> *args;
if (init == NULL_TREE
|| TREE_CODE (init) == TREE_LIST)
args = make_tree_vector_from_list (init);
else if (BRACE_ENCLOSED_INITIALIZER_P (init)
&& !TYPE_HAS_LIST_CTOR (type)
&& !is_std_init_list (type))
args = make_tree_vector_from_ctor (init);
else
args = make_tree_vector_single (init);
tree dname = dguide_name (tmpl);
tree cands = lookup_qualified_name (CP_DECL_CONTEXT (tmpl), dname,
/*type*/false, /*complain*/false,
/*hidden*/false);
if (cands == error_mark_node)
cands = NULL_TREE;
tree outer_args = NULL_TREE;
if (DECL_CLASS_SCOPE_P (tmpl)
&& CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (tmpl)))
{
outer_args = CLASSTYPE_TI_ARGS (DECL_CONTEXT (tmpl));
type = TREE_TYPE (most_general_template (tmpl));
}
bool saw_ctor = false;
if (CLASSTYPE_METHOD_VEC (type))
// FIXME cache artificial deduction guides
for (tree fns = CLASSTYPE_CONSTRUCTORS (type); fns; fns = OVL_NEXT (fns))
{
tree fn = OVL_CURRENT (fns);
tree guide = build_deduction_guide (fn, outer_args, complain);
cands = ovl_cons (guide, cands);
saw_ctor = true;
}
if (!saw_ctor && args->length() == 0)
{
tree guide = build_deduction_guide (type, outer_args, complain);
cands = ovl_cons (guide, cands);
}
if (args->length() == 1)
{
tree guide = build_deduction_guide (build_reference_type (type),
outer_args, complain);
cands = ovl_cons (guide, cands);
}
/* Prune explicit deduction guides in copy-initialization context. */
tree old_cands = cands;
if (flags & LOOKUP_ONLYCONVERTING)
{
tree t = cands;
for (; t; t = OVL_NEXT (t))
if (DECL_NONCONVERTING_P (STRIP_TEMPLATE (OVL_CURRENT (t))))
break;
if (t)
{
tree pruned = NULL_TREE;
for (t = cands; t; t = OVL_NEXT (t))
{
tree f = OVL_CURRENT (t);
if (!DECL_NONCONVERTING_P (STRIP_TEMPLATE (f)))
pruned = build_overload (f, pruned);
}
cands = pruned;
if (cands == NULL_TREE)
{
error ("cannot deduce template arguments for copy-initialization"
" of %qT, as it has no non-explicit deduction guides or "
"user-declared constructors", type);
return error_mark_node;
}
}
}
++cp_unevaluated_operand;
tree t = build_new_function_call (cands, &args, /*koenig*/false,
tf_decltype);
if (t == error_mark_node && (complain & tf_warning_or_error))
{
error ("class template argument deduction failed:");
t = build_new_function_call (cands, &args, /*koenig*/false,
complain | tf_decltype);
if (old_cands != cands)
inform (input_location, "explicit deduction guides not considered "
"for copy-initialization");
}
--cp_unevaluated_operand;
release_tree_vector (args);
return TREE_TYPE (t);
}
/* Replace occurrences of 'auto' in TYPE with the appropriate type deduced
from INIT. AUTO_NODE is the TEMPLATE_TYPE_PARM used for 'auto' in TYPE. */
tree
do_auto_deduction (tree type, tree init, tree auto_node)
{
return do_auto_deduction (type, init, auto_node,
tf_warning_or_error,
adc_unspecified);
}
/* Replace occurrences of 'auto' in TYPE with the appropriate type deduced
from INIT. AUTO_NODE is the TEMPLATE_TYPE_PARM used for 'auto' in TYPE.
The CONTEXT determines the context in which auto deduction is performed
and is used to control error diagnostics. FLAGS are the LOOKUP_* flags.
OUTER_TARGS are used during template argument deduction
(context == adc_unify) to properly substitute the result, and is ignored
in other contexts.
For partial-concept-ids, extra args may be appended to the list of deduced
template arguments prior to determining constraint satisfaction. */
tree
do_auto_deduction (tree type, tree init, tree auto_node,
tsubst_flags_t complain, auto_deduction_context context,
tree outer_targs, int flags)
{
tree targs;
if (init == error_mark_node)
return error_mark_node;
if (init && type_dependent_expression_p (init)
&& context != adc_unify)
/* Defining a subset of type-dependent expressions that we can deduce
from ahead of time isn't worth the trouble. */
return type;
if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node))
/* C++17 class template argument deduction. */
return do_class_deduction (type, tmpl, init, flags, complain);
if (TREE_TYPE (init) == NULL_TREE)
/* Nothing we can do with this, even in deduction context. */
return type;
/* [dcl.spec.auto]: Obtain P from T by replacing the occurrences of auto
with either a new invented type template parameter U or, if the
initializer is a braced-init-list (8.5.4), with
std::initializer_list<U>. */
if (BRACE_ENCLOSED_INITIALIZER_P (init))
{
if (!DIRECT_LIST_INIT_P (init))
type = listify_autos (type, auto_node);
else if (CONSTRUCTOR_NELTS (init) == 1)
init = CONSTRUCTOR_ELT (init, 0)->value;
else
{
if (complain & tf_warning_or_error)
{
if (permerror (input_location, "direct-list-initialization of "
"%<auto%> requires exactly one element"))
inform (input_location,
"for deduction to %<std::initializer_list%>, use copy-"
"list-initialization (i.e. add %<=%> before the %<{%>)");
}
type = listify_autos (type, auto_node);
}
}
if (type == error_mark_node)
return error_mark_node;
init = resolve_nondeduced_context (init, complain);
if (context == adc_decomp_type
&& auto_node == type
&& init != error_mark_node
&& TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE)
/* [dcl.decomp]/1 - if decomposition declaration has no ref-qualifiers
and initializer has array type, deduce cv-qualified array type. */
return cp_build_qualified_type_real (TREE_TYPE (init), TYPE_QUALS (type),
complain);
else if (AUTO_IS_DECLTYPE (auto_node))
{
bool id = (DECL_P (init)
|| ((TREE_CODE (init) == COMPONENT_REF
|| TREE_CODE (init) == SCOPE_REF)
&& !REF_PARENTHESIZED_P (init)));
targs = make_tree_vec (1);
TREE_VEC_ELT (targs, 0)
= finish_decltype_type (init, id, tf_warning_or_error);
if (type != auto_node)
{
if (complain & tf_error)
error ("%qT as type rather than plain %<decltype(auto)%>", type);
return error_mark_node;
}
}
else
{
tree parms = build_tree_list (NULL_TREE, type);
tree tparms;
if (flag_concepts)
tparms = extract_autos (type);
else
{
tparms = make_tree_vec (1);
TREE_VEC_ELT (tparms, 0)
= build_tree_list (NULL_TREE, TYPE_NAME (auto_node));
}
targs = make_tree_vec (TREE_VEC_LENGTH (tparms));
int val = type_unification_real (tparms, targs, parms, &init, 1, 0,
DEDUCE_CALL, LOOKUP_NORMAL,
NULL, /*explain_p=*/false);
if (val > 0)
{
if (processing_template_decl)
/* Try again at instantiation time. */
return type;
if (type && type != error_mark_node
&& (complain & tf_error))
/* If type is error_mark_node a diagnostic must have been
emitted by now. Also, having a mention to '<type error>'
in the diagnostic is not really useful to the user. */
{
if (cfun && auto_node == current_function_auto_return_pattern
&& LAMBDA_FUNCTION_P (current_function_decl))
error ("unable to deduce lambda return type from %qE", init);
else
error ("unable to deduce %qT from %qE", type, init);
type_unification_real (tparms, targs, parms, &init, 1, 0,
DEDUCE_CALL, LOOKUP_NORMAL,
NULL, /*explain_p=*/true);
}
return error_mark_node;
}
}
/* Check any placeholder constraints against the deduced type. */
if (flag_concepts && !processing_template_decl)
if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (auto_node))
{
/* Use the deduced type to check the associated constraints. If we
have a partial-concept-id, rebuild the argument list so that
we check using the extra arguments. */
gcc_assert (TREE_CODE (constr) == CHECK_CONSTR);
tree cargs = CHECK_CONSTR_ARGS (constr);
if (TREE_VEC_LENGTH (cargs) > 1)
{
cargs = copy_node (cargs);
TREE_VEC_ELT (cargs, 0) = TREE_VEC_ELT (targs, 0);
}
else
cargs = targs;
if (!constraints_satisfied_p (constr, cargs))
{
if (complain & tf_warning_or_error)
{
switch (context)
{
case adc_unspecified:
case adc_unify:
error("placeholder constraints not satisfied");
break;
case adc_variable_type:
case adc_decomp_type:
error ("deduced initializer does not satisfy "
"placeholder constraints");
break;
case adc_return_type:
error ("deduced return type does not satisfy "
"placeholder constraints");
break;
case adc_requirement:
error ("deduced expression type does not satisfy "
"placeholder constraints");
break;
}
diagnose_constraints (input_location, constr, targs);
}
return error_mark_node;
}
}
if (processing_template_decl && context != adc_unify)
outer_targs = current_template_args ();
targs = add_to_template_args (outer_targs, targs);
return tsubst (type, targs, complain, NULL_TREE);
}
/* Substitutes LATE_RETURN_TYPE for 'auto' in TYPE and returns the
result. */
tree
splice_late_return_type (tree type, tree late_return_type)
{
if (is_auto (type))
{
if (late_return_type)
return late_return_type;
tree idx = get_template_parm_index (type);
if (TEMPLATE_PARM_LEVEL (idx) <= processing_template_decl)
/* In an abbreviated function template we didn't know we were dealing
with a function template when we saw the auto return type, so update
it to have the correct level. */
return make_auto_1 (TYPE_IDENTIFIER (type), true);
}
return type;
}
/* Returns true iff TYPE is a TEMPLATE_TYPE_PARM representing 'auto' or
'decltype(auto)' or a deduced class template. */
bool
is_auto (const_tree type)
{
if (TREE_CODE (type) == TEMPLATE_TYPE_PARM
&& (TYPE_IDENTIFIER (type) == auto_identifier
|| TYPE_IDENTIFIER (type) == decltype_auto_identifier
|| CLASS_PLACEHOLDER_TEMPLATE (type)))
return true;
else
return false;
}
/* for_each_template_parm callback for type_uses_auto. */
int
is_auto_r (tree tp, void */*data*/)
{
return is_auto_or_concept (tp);
}
/* Returns the TEMPLATE_TYPE_PARM in TYPE representing `auto' iff TYPE contains
a use of `auto'. Returns NULL_TREE otherwise. */
tree
type_uses_auto (tree type)
{
if (type == NULL_TREE)
return NULL_TREE;
else if (flag_concepts)
{
/* The Concepts TS allows multiple autos in one type-specifier; just
return the first one we find, do_auto_deduction will collect all of
them. */
if (uses_template_parms (type))
return for_each_template_parm (type, is_auto_r, /*data*/NULL,
/*visited*/NULL, /*nondeduced*/true);
else
return NULL_TREE;
}
else
return find_type_usage (type, is_auto);
}
/* Returns true iff TYPE is a TEMPLATE_TYPE_PARM representing 'auto',
'decltype(auto)' or a concept. */
bool
is_auto_or_concept (const_tree type)
{
return is_auto (type); // or concept
}
/* Returns the TEMPLATE_TYPE_PARM in TYPE representing a generic type (`auto' or
a concept identifier) iff TYPE contains a use of a generic type. Returns
NULL_TREE otherwise. */
tree
type_uses_auto_or_concept (tree type)
{
return find_type_usage (type, is_auto_or_concept);
}
/* For a given template T, return the vector of typedefs referenced
in T for which access check is needed at T instantiation time.
T is either a FUNCTION_DECL or a RECORD_TYPE.
Those typedefs were added to T by the function
append_type_to_template_for_access_check. */
vec<qualified_typedef_usage_t, va_gc> *
get_types_needing_access_check (tree t)
{
tree ti;
vec<qualified_typedef_usage_t, va_gc> *result = NULL;
if (!t || t == error_mark_node)
return NULL;
if (!(ti = get_template_info (t)))
return NULL;
if (CLASS_TYPE_P (t)
|| TREE_CODE (t) == FUNCTION_DECL)
{
if (!TI_TEMPLATE (ti))
return NULL;
result = TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti);
}
return result;
}
/* Append the typedef TYPE_DECL used in template T to a list of typedefs
tied to T. That list of typedefs will be access checked at
T instantiation time.
T is either a FUNCTION_DECL or a RECORD_TYPE.
TYPE_DECL is a TYPE_DECL node representing a typedef.
SCOPE is the scope through which TYPE_DECL is accessed.
LOCATION is the location of the usage point of TYPE_DECL.
This function is a subroutine of
append_type_to_template_for_access_check. */
static void
append_type_to_template_for_access_check_1 (tree t,
tree type_decl,
tree scope,
location_t location)
{
qualified_typedef_usage_t typedef_usage;
tree ti;
if (!t || t == error_mark_node)
return;
gcc_assert ((TREE_CODE (t) == FUNCTION_DECL
|| CLASS_TYPE_P (t))
&& type_decl
&& TREE_CODE (type_decl) == TYPE_DECL
&& scope);
if (!(ti = get_template_info (t)))
return;
gcc_assert (TI_TEMPLATE (ti));
typedef_usage.typedef_decl = type_decl;
typedef_usage.context = scope;
typedef_usage.locus = location;
vec_safe_push (TI_TYPEDEFS_NEEDING_ACCESS_CHECKING (ti), typedef_usage);
}
/* Append TYPE_DECL to the template TEMPL.
TEMPL is either a class type, a FUNCTION_DECL or a a TEMPLATE_DECL.
At TEMPL instanciation time, TYPE_DECL will be checked to see
if it can be accessed through SCOPE.
LOCATION is the location of the usage point of TYPE_DECL.
e.g. consider the following code snippet:
class C
{
typedef int myint;
};
template<class U> struct S
{
C::myint mi; // <-- usage point of the typedef C::myint
};
S<char> s;
At S<char> instantiation time, we need to check the access of C::myint
In other words, we need to check the access of the myint typedef through
the C scope. For that purpose, this function will add the myint typedef
and the scope C through which its being accessed to a list of typedefs
tied to the template S. That list will be walked at template instantiation
time and access check performed on each typedefs it contains.
Note that this particular code snippet should yield an error because
myint is private to C. */
void
append_type_to_template_for_access_check (tree templ,
tree type_decl,
tree scope,
location_t location)
{
qualified_typedef_usage_t *iter;
unsigned i;
gcc_assert (type_decl && (TREE_CODE (type_decl) == TYPE_DECL));
/* Make sure we don't append the type to the template twice. */
FOR_EACH_VEC_SAFE_ELT (get_types_needing_access_check (templ), i, iter)
if (iter->typedef_decl == type_decl && scope == iter->context)
return;
append_type_to_template_for_access_check_1 (templ, type_decl,
scope, location);
}
/* Convert the generic type parameters in PARM that match the types given in the
range [START_IDX, END_IDX) from the current_template_parms into generic type
packs. */
tree
convert_generic_types_to_packs (tree parm, int start_idx, int end_idx)
{
tree current = current_template_parms;
int depth = TMPL_PARMS_DEPTH (current);
current = INNERMOST_TEMPLATE_PARMS (current);
tree replacement = make_tree_vec (TREE_VEC_LENGTH (current));
for (int i = 0; i < start_idx; ++i)
TREE_VEC_ELT (replacement, i)
= TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i)));
for (int i = start_idx; i < end_idx; ++i)
{
/* Create a distinct parameter pack type from the current parm and add it
to the replacement args to tsubst below into the generic function
parameter. */
tree o = TREE_TYPE (TREE_VALUE
(TREE_VEC_ELT (current, i)));
tree t = copy_type (o);
TEMPLATE_TYPE_PARM_INDEX (t)
= reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (o),
o, 0, 0, tf_none);
TREE_TYPE (TEMPLATE_TYPE_DECL (t)) = t;
TYPE_STUB_DECL (t) = TYPE_NAME (t) = TEMPLATE_TYPE_DECL (t);
TYPE_MAIN_VARIANT (t) = t;
TEMPLATE_TYPE_PARAMETER_PACK (t) = true;
TYPE_CANONICAL (t) = canonical_type_parameter (t);
TREE_VEC_ELT (replacement, i) = t;
TREE_VALUE (TREE_VEC_ELT (current, i)) = TREE_CHAIN (t);
}
for (int i = end_idx, e = TREE_VEC_LENGTH (current); i < e; ++i)
TREE_VEC_ELT (replacement, i)
= TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i)));
/* If there are more levels then build up the replacement with the outer
template parms. */
if (depth > 1)
replacement = add_to_template_args (template_parms_to_args
(TREE_CHAIN (current_template_parms)),
replacement);
return tsubst (parm, replacement, tf_none, NULL_TREE);
}
/* Entries in the decl_constraint hash table. */
struct GTY((for_user)) constr_entry
{
tree decl;
tree ci;
};
/* Hashing function and equality for constraint entries. */
struct constr_hasher : ggc_ptr_hash<constr_entry>
{
static hashval_t hash (constr_entry *e)
{
return (hashval_t)DECL_UID (e->decl);
}
static bool equal (constr_entry *e1, constr_entry *e2)
{
return e1->decl == e2->decl;
}
};
/* A mapping from declarations to constraint information. Note that
both templates and their underlying declarations are mapped to the
same constraint information.
FIXME: This is defined in pt.c because garbage collection
code is not being generated for constraint.cc. */
static GTY (()) hash_table<constr_hasher> *decl_constraints;
/* Returns the template constraints of declaration T. If T is not
constrained, return NULL_TREE. Note that T must be non-null. */
tree
get_constraints (tree t)
{
if (!flag_concepts)
return NULL_TREE;
gcc_assert (DECL_P (t));
if (TREE_CODE (t) == TEMPLATE_DECL)
t = DECL_TEMPLATE_RESULT (t);
constr_entry elt = { t, NULL_TREE };
constr_entry* found = decl_constraints->find (&elt);
if (found)
return found->ci;
else
return NULL_TREE;
}
/* Associate the given constraint information CI with the declaration
T. If T is a template, then the constraints are associated with
its underlying declaration. Don't build associations if CI is
NULL_TREE. */
void
set_constraints (tree t, tree ci)
{
if (!ci)
return;
gcc_assert (t && flag_concepts);
if (TREE_CODE (t) == TEMPLATE_DECL)
t = DECL_TEMPLATE_RESULT (t);
gcc_assert (!get_constraints (t));
constr_entry elt = {t, ci};
constr_entry** slot = decl_constraints->find_slot (&elt, INSERT);
constr_entry* entry = ggc_alloc<constr_entry> ();
*entry = elt;
*slot = entry;
}
/* Remove the associated constraints of the declaration T. */
void
remove_constraints (tree t)
{
gcc_assert (DECL_P (t));
if (TREE_CODE (t) == TEMPLATE_DECL)
t = DECL_TEMPLATE_RESULT (t);
constr_entry elt = {t, NULL_TREE};
constr_entry** slot = decl_constraints->find_slot (&elt, NO_INSERT);
if (slot)
decl_constraints->clear_slot (slot);
}
/* Memoized satisfaction results for declarations. This
maps the pair (constraint_info, arguments) to the result computed
by constraints_satisfied_p. */
struct GTY((for_user)) constraint_sat_entry
{
tree ci;
tree args;
tree result;
};
/* Hashing function and equality for constraint entries. */
struct constraint_sat_hasher : ggc_ptr_hash<constraint_sat_entry>
{
static hashval_t hash (constraint_sat_entry *e)
{
hashval_t val = iterative_hash_object(e->ci, 0);
return iterative_hash_template_arg (e->args, val);
}
static bool equal (constraint_sat_entry *e1, constraint_sat_entry *e2)
{
return e1->ci == e2->ci && comp_template_args (e1->args, e2->args);
}
};
/* Memoized satisfaction results for concept checks. */
struct GTY((for_user)) concept_spec_entry
{
tree tmpl;
tree args;
tree result;
};
/* Hashing function and equality for constraint entries. */
struct concept_spec_hasher : ggc_ptr_hash<concept_spec_entry>
{
static hashval_t hash (concept_spec_entry *e)
{
return hash_tmpl_and_args (e->tmpl, e->args);
}
static bool equal (concept_spec_entry *e1, concept_spec_entry *e2)
{
++comparing_specializations;
bool eq = e1->tmpl == e2->tmpl && comp_template_args (e1->args, e2->args);
--comparing_specializations;
return eq;
}
};
static GTY (()) hash_table<constraint_sat_hasher> *constraint_memos;
static GTY (()) hash_table<concept_spec_hasher> *concept_memos;
/* Search for a memoized satisfaction result. Returns one of the
truth value nodes if previously memoized, or NULL_TREE otherwise. */
tree
lookup_constraint_satisfaction (tree ci, tree args)
{
constraint_sat_entry elt = { ci, args, NULL_TREE };
constraint_sat_entry* found = constraint_memos->find (&elt);
if (found)
return found->result;
else
return NULL_TREE;
}
/* Memoize the result of a satisfication test. Returns the saved result. */
tree
memoize_constraint_satisfaction (tree ci, tree args, tree result)
{
constraint_sat_entry elt = {ci, args, result};
constraint_sat_entry** slot = constraint_memos->find_slot (&elt, INSERT);
constraint_sat_entry* entry = ggc_alloc<constraint_sat_entry> ();
*entry = elt;
*slot = entry;
return result;
}
/* Search for a memoized satisfaction result for a concept. */
tree
lookup_concept_satisfaction (tree tmpl, tree args)
{
concept_spec_entry elt = { tmpl, args, NULL_TREE };
concept_spec_entry* found = concept_memos->find (&elt);
if (found)
return found->result;
else
return NULL_TREE;
}
/* Memoize the result of a concept check. Returns the saved result. */
tree
memoize_concept_satisfaction (tree tmpl, tree args, tree result)
{
concept_spec_entry elt = {tmpl, args, result};
concept_spec_entry** slot = concept_memos->find_slot (&elt, INSERT);
concept_spec_entry* entry = ggc_alloc<concept_spec_entry> ();
*entry = elt;
*slot = entry;
return result;
}
static GTY (()) hash_table<concept_spec_hasher> *concept_expansions;
/* Returns a prior concept specialization. This returns the substituted
and normalized constraints defined by the concept. */
tree
get_concept_expansion (tree tmpl, tree args)
{
concept_spec_entry elt = { tmpl, args, NULL_TREE };
concept_spec_entry* found = concept_expansions->find (&elt);
if (found)
return found->result;
else
return NULL_TREE;
}
/* Save a concept expansion for later. */
tree
save_concept_expansion (tree tmpl, tree args, tree def)
{
concept_spec_entry elt = {tmpl, args, def};
concept_spec_entry** slot = concept_expansions->find_slot (&elt, INSERT);
concept_spec_entry* entry = ggc_alloc<concept_spec_entry> ();
*entry = elt;
*slot = entry;
return def;
}
static hashval_t
hash_subsumption_args (tree t1, tree t2)
{
gcc_assert (TREE_CODE (t1) == CHECK_CONSTR);
gcc_assert (TREE_CODE (t2) == CHECK_CONSTR);
int val = 0;
val = iterative_hash_object (CHECK_CONSTR_CONCEPT (t1), val);
val = iterative_hash_template_arg (CHECK_CONSTR_ARGS (t1), val);
val = iterative_hash_object (CHECK_CONSTR_CONCEPT (t2), val);
val = iterative_hash_template_arg (CHECK_CONSTR_ARGS (t2), val);
return val;
}
/* Compare the constraints of two subsumption entries. The LEFT1 and
LEFT2 arguments comprise the first subsumption pair and the RIGHT1
and RIGHT2 arguments comprise the second. These are all CHECK_CONSTRs. */
static bool
comp_subsumption_args (tree left1, tree left2, tree right1, tree right2)
{
if (CHECK_CONSTR_CONCEPT (left1) == CHECK_CONSTR_CONCEPT (right1))
if (CHECK_CONSTR_CONCEPT (left2) == CHECK_CONSTR_CONCEPT (right2))
if (comp_template_args (CHECK_CONSTR_ARGS (left1),
CHECK_CONSTR_ARGS (right1)))
return comp_template_args (CHECK_CONSTR_ARGS (left2),
CHECK_CONSTR_ARGS (right2));
return false;
}
/* Key/value pair for learning and memoizing subsumption results. This
associates a pair of check constraints (including arguments) with
a boolean value indicating the result. */
struct GTY((for_user)) subsumption_entry
{
tree t1;
tree t2;
bool result;
};
/* Hashing function and equality for constraint entries. */
struct subsumption_hasher : ggc_ptr_hash<subsumption_entry>
{
static hashval_t hash (subsumption_entry *e)
{
return hash_subsumption_args (e->t1, e->t2);
}
static bool equal (subsumption_entry *e1, subsumption_entry *e2)
{
++comparing_specializations;
bool eq = comp_subsumption_args(e1->t1, e1->t2, e2->t1, e2->t2);
--comparing_specializations;
return eq;
}
};
static GTY (()) hash_table<subsumption_hasher> *subsumption_table;
/* Search for a previously cached subsumption result. */
bool*
lookup_subsumption_result (tree t1, tree t2)
{
subsumption_entry elt = { t1, t2, false };
subsumption_entry* found = subsumption_table->find (&elt);
if (found)
return &found->result;
else
return 0;
}
/* Save a subsumption result. */
bool
save_subsumption_result (tree t1, tree t2, bool result)
{
subsumption_entry elt = {t1, t2, result};
subsumption_entry** slot = subsumption_table->find_slot (&elt, INSERT);
subsumption_entry* entry = ggc_alloc<subsumption_entry> ();
*entry = elt;
*slot = entry;
return result;
}
/* Set up the hash table for constraint association. */
void
init_constraint_processing (void)
{
if (!flag_concepts)
return;
decl_constraints = hash_table<constr_hasher>::create_ggc(37);
constraint_memos = hash_table<constraint_sat_hasher>::create_ggc(37);
concept_memos = hash_table<concept_spec_hasher>::create_ggc(37);
concept_expansions = hash_table<concept_spec_hasher>::create_ggc(37);
subsumption_table = hash_table<subsumption_hasher>::create_ggc(37);
}
/* Set up the hash tables for template instantiations. */
void
init_template_processing (void)
{
decl_specializations = hash_table<spec_hasher>::create_ggc (37);
type_specializations = hash_table<spec_hasher>::create_ggc (37);
}
/* Print stats about the template hash tables for -fstats. */
void
print_template_statistics (void)
{
fprintf (stderr, "decl_specializations: size %ld, %ld elements, "
"%f collisions\n", (long) decl_specializations->size (),
(long) decl_specializations->elements (),
decl_specializations->collisions ());
fprintf (stderr, "type_specializations: size %ld, %ld elements, "
"%f collisions\n", (long) type_specializations->size (),
(long) type_specializations->elements (),
type_specializations->collisions ());
}
#include "gt-cp-pt.h"
|
DRB047-doallchar-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
One dimension array computation
with finer granularity than traditional 4 bytes.
Dynamic tools monitoring 4-bytes elements may wrongfuly report race condition.
*/
char a[100];
int main()
{
int i;
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<100; i ++ )
{
a[i]=i;
}
#pragma cetus private(i)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<100; i ++ )
{
a[i]=(a[i]+1);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<100; i ++ )
{
printf("%c\n", a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.